diff --git a/go.mod b/go.mod
index 9342b08ac..8551c468d 100644
--- a/go.mod
+++ b/go.mod
@@ -10,7 +10,7 @@ require (
github.com/kubernetes-csi/csi-lib-utils v0.22.0
golang.org/x/oauth2 v0.27.0 // indirect
golang.org/x/term v0.31.0 // indirect
- google.golang.org/grpc v1.69.2
+ google.golang.org/grpc v1.72.1
k8s.io/api v0.33.1
k8s.io/apimachinery v0.33.1
k8s.io/apiserver v0.33.0
@@ -25,8 +25,8 @@ require (
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
- github.com/emicklei/go-restful/v3 v3.12.1 // indirect
- github.com/fxamacker/cbor/v2 v2.7.0 // indirect
+ github.com/emicklei/go-restful/v3 v3.12.2 // indirect
+ github.com/fxamacker/cbor/v2 v2.8.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
@@ -34,58 +34,61 @@ require (
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/google/gnostic-models v0.6.9 // indirect
+ github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.9.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
- github.com/spf13/cobra v1.8.1 // indirect
- github.com/spf13/pflag v1.0.5 // indirect
+ github.com/spf13/cobra v1.9.1 // indirect
+ github.com/spf13/pflag v1.0.6 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
- go.opentelemetry.io/otel v1.33.0 // indirect
- go.opentelemetry.io/otel/metric v1.33.0 // indirect
- go.opentelemetry.io/otel/trace v1.33.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
+ go.opentelemetry.io/otel v1.35.0 // indirect
+ go.opentelemetry.io/otel/metric v1.35.0 // indirect
+ go.opentelemetry.io/otel/trace v1.35.0 // indirect
go.uber.org/automaxprocs v1.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
+ go.yaml.in/yaml/v2 v2.4.2 // indirect
+ go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/net v0.39.0 // indirect
golang.org/x/sys v0.32.0 // indirect
golang.org/x/text v0.24.0 // indirect
golang.org/x/time v0.9.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/protobuf v1.36.5 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
- k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect
+ k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
+ k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
- sigs.k8s.io/yaml v1.4.0 // indirect
+ sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
+ sigs.k8s.io/yaml v1.5.0 // indirect
)
-replace k8s.io/api => k8s.io/api v0.33.0
+replace k8s.io/api => github.com/carlory/kubernetes/staging/src/k8s.io/api v0.0.0-20250724034424-78a2afa4ba07
-replace k8s.io/apimachinery => k8s.io/apimachinery v0.33.0
+replace k8s.io/apimachinery => github.com/carlory/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250724034424-78a2afa4ba07
-replace k8s.io/apiserver => k8s.io/apiserver v0.33.0
+replace k8s.io/apiserver => github.com/carlory/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250724034424-78a2afa4ba07
-replace k8s.io/client-go => k8s.io/client-go v0.33.0
+replace k8s.io/client-go => github.com/carlory/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250724034424-78a2afa4ba07
-replace k8s.io/component-base => k8s.io/component-base v0.33.0
+replace k8s.io/component-base => github.com/carlory/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250724034424-78a2afa4ba07
-replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.33.0
+replace k8s.io/csi-translation-lib => github.com/carlory/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250724034424-78a2afa4ba07
-replace k8s.io/kms => k8s.io/kms v0.33.0
+replace k8s.io/kms => github.com/carlory/kubernetes/staging/src/k8s.io/kms v0.0.0-20250724034424-78a2afa4ba07
diff --git a/go.sum b/go.sum
index 469ca099d..43ef44a83 100644
--- a/go.sum
+++ b/go.sum
@@ -2,19 +2,31 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
+github.com/carlory/kubernetes/staging/src/k8s.io/api v0.0.0-20250724034424-78a2afa4ba07 h1:+SWXNvZXPdFMbs5Qgfu7f/jXH4xRcXTcJE6S48Q1gqs=
+github.com/carlory/kubernetes/staging/src/k8s.io/api v0.0.0-20250724034424-78a2afa4ba07/go.mod h1:oAbiREintv4AjIOJp24Y0D6c/S0VsL2wSL8/KURHyLg=
+github.com/carlory/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250724034424-78a2afa4ba07 h1:2L65+4dw2E+NZX98QSP5/Fj3nDI2mqm5oAlSLaeyKkw=
+github.com/carlory/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250724034424-78a2afa4ba07/go.mod h1:v1p1Jsze3IHLy5gU17yVqR2qLO7jgYeX6mw3HZy2AEU=
+github.com/carlory/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250724034424-78a2afa4ba07 h1:8FHynjPkes2RoPCD6Q5upuCGj3MB7zQS/9g59/stiRg=
+github.com/carlory/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250724034424-78a2afa4ba07/go.mod h1:cGPHna86dnfrTohR4LCaATxrnyhZJOY+eZHJ1P8WS5I=
+github.com/carlory/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250724034424-78a2afa4ba07 h1:yXh0ldNEOt6mGyVwvxBGniDTu+zdSZNqc/EGvM1GANI=
+github.com/carlory/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250724034424-78a2afa4ba07/go.mod h1:8D4lk3MNioSJsWCThKJdKnMBLOkSBnI++8BV8LkI3yg=
+github.com/carlory/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250724034424-78a2afa4ba07 h1:pVofdQwH6pDTz6gOcW0/qwVQzjHJ5k90VgBbNIOzBUE=
+github.com/carlory/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250724034424-78a2afa4ba07/go.mod h1:CFN5On0NTmC1kK3+WBbLPOl6ctucCEXS8x/z3AtLFuw=
+github.com/carlory/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250724034424-78a2afa4ba07 h1:GbKQijt9MO5r89wOwSUnBC/wp4Px1E7N6K7cydWEBxc=
+github.com/carlory/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250724034424-78a2afa4ba07/go.mod h1:0RY+aNg4v6euN3kljumZMg5EJSYaFKYohK7tpOmFCcc=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/container-storage-interface/spec v1.11.0 h1:H/YKTOeUZwHtyPOr9raR+HgFmGluGCklulxDYxSdVNM=
github.com/container-storage-interface/spec v1.11.0/go.mod h1:DtUvaQszPml1YJfIK7c00mlv6/g4wNMLanLgiUbKFRI=
-github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
-github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
-github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
-github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
+github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
+github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
+github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@@ -34,9 +46,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
-github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
-github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
+github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -67,8 +78,9 @@ github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUt
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
@@ -93,10 +105,10 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
-github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
+github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
+github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
+github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
@@ -109,18 +121,18 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
-go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
-go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
-go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
-go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
-go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
-go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
-go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
-go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
-go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
-go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
+go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
+go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
+go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
+go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
+go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
+go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
+go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
+go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
+go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
+go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -129,6 +141,10 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
+go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -168,10 +184,10 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422 h1:3UsHvIr4Wc2aW4brOaSCmcxh9ksica6fHEr8P1XhkYw=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
-google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
-google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
+google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA=
+google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -183,30 +199,17 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU=
-k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM=
-k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ=
-k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
-k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc=
-k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8=
-k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98=
-k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg=
-k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk=
-k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU=
-k8s.io/csi-translation-lib v0.33.0 h1:kW3xVPCTXmHmK5v/8PEVZCUZSdNRndiQat0SbN31qEM=
-k8s.io/csi-translation-lib v0.33.0/go.mod h1:Ldx85t1WxFStKQ2p5Xaimv39IkTc7/m8yNMkt8MlyoQ=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
-k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
-k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
-k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
+k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
-sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
-sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
-sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
-sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
-sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
+sigs.k8s.io/yaml v1.5.0 h1:M10b2U7aEUY6hRtU870n2VTPgR5RZiL/I6Lcc2F4NUQ=
+sigs.k8s.io/yaml v1.5.0/go.mod h1:wZs27Rbxoai4C0f8/9urLZtZtF3avA3gKvGyPdDqTO4=
diff --git a/pkg/features/features.go b/pkg/features/features.go
index ba5f7e4bd..4c5ac2f48 100644
--- a/pkg/features/features.go
+++ b/pkg/features/features.go
@@ -37,6 +37,7 @@ const (
// kep: https://kep.k8s.io/3751
// alpha: v1.29
// beta: v1.31
+ // GA: v1.34
//
// Pass VolumeAttributesClass parameters to supporting CSI drivers during ModifyVolume
VolumeAttributesClass featuregate.Feature = "VolumeAttributesClass"
@@ -49,5 +50,5 @@ func init() {
var defaultResizerFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
AnnotateFsResize: {Default: false, PreRelease: featuregate.Alpha},
RecoverVolumeExpansionFailure: {Default: true, PreRelease: featuregate.Beta},
- VolumeAttributesClass: {Default: false, PreRelease: featuregate.Beta},
+ VolumeAttributesClass: {Default: true, PreRelease: featuregate.GA},
}
diff --git a/pkg/modifycontroller/controller.go b/pkg/modifycontroller/controller.go
index 1895a3553..cbe5520e9 100644
--- a/pkg/modifycontroller/controller.go
+++ b/pkg/modifycontroller/controller.go
@@ -33,7 +33,7 @@ import (
"k8s.io/client-go/kubernetes/scheme"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
- storagev1beta1listers "k8s.io/client-go/listers/storage/v1beta1"
+ storagev1listers "k8s.io/client-go/listers/storage/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
@@ -57,7 +57,7 @@ type modifyController struct {
pvListerSynced cache.InformerSynced
pvcLister corelisters.PersistentVolumeClaimLister
pvcListerSynced cache.InformerSynced
- vacLister storagev1beta1listers.VolumeAttributesClassLister
+ vacLister storagev1listers.VolumeAttributesClassLister
vacListerSynced cache.InformerSynced
extraModifyMetadata bool
// the key of the map is {PVC_NAMESPACE}/{PVC_NAME}
@@ -78,7 +78,7 @@ func NewModifyController(
pvcRateLimiter workqueue.TypedRateLimiter[string]) ModifyController {
pvInformer := informerFactory.Core().V1().PersistentVolumes()
pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims()
- vacInformer := informerFactory.Storage().V1beta1().VolumeAttributesClasses()
+ vacInformer := informerFactory.Storage().V1().VolumeAttributesClasses()
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartStructuredLogging(0)
eventBroadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: kubeClient.CoreV1().Events(v1.NamespaceAll)})
diff --git a/pkg/modifycontroller/controller_test.go b/pkg/modifycontroller/controller_test.go
index 3acd842e1..73e0dce96 100644
--- a/pkg/modifycontroller/controller_test.go
+++ b/pkg/modifycontroller/controller_test.go
@@ -14,7 +14,7 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
v1 "k8s.io/api/core/v1"
- storagev1beta1 "k8s.io/api/storage/v1beta1"
+ storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilfeature "k8s.io/apiserver/pkg/util/feature"
@@ -331,7 +331,7 @@ func setupFakeK8sEnvironment(t *testing.T, client *csi.MockClient, initialObject
kubeClient, informerFactory := fakeK8s(initialObjects)
pvInformer := informerFactory.Core().V1().PersistentVolumes()
pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims()
- vacInformer := informerFactory.Storage().V1beta1().VolumeAttributesClasses()
+ vacInformer := informerFactory.Storage().V1().VolumeAttributesClasses()
driverName, _ := client.GetDriverName(context.TODO())
@@ -358,7 +358,7 @@ func setupFakeK8sEnvironment(t *testing.T, client *csi.MockClient, initialObject
pvInformer.Informer().GetStore().Add(obj)
case *v1.PersistentVolumeClaim:
pvcInformer.Informer().GetStore().Add(obj)
- case *storagev1beta1.VolumeAttributesClass:
+ case *storagev1.VolumeAttributesClass:
vacInformer.Informer().GetStore().Add(obj)
default:
t.Fatalf("Test %s: Unknown initalObject type: %+v", t.Name(), obj)
diff --git a/pkg/modifycontroller/modify_status_test.go b/pkg/modifycontroller/modify_status_test.go
index 50c7de27a..cb9469890 100644
--- a/pkg/modifycontroller/modify_status_test.go
+++ b/pkg/modifycontroller/modify_status_test.go
@@ -14,7 +14,7 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
v1 "k8s.io/api/core/v1"
- storagev1beta1 "k8s.io/api/storage/v1beta1"
+ storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -306,7 +306,7 @@ func TestRemovePVCFromModifyVolumeUncertainCache(t *testing.T) {
pvInformer := informerFactory.Core().V1().PersistentVolumes()
pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims()
podInformer := informerFactory.Core().V1().Pods()
- vacInformer := informerFactory.Storage().V1beta1().VolumeAttributesClasses()
+ vacInformer := informerFactory.Storage().V1().VolumeAttributesClasses()
csiModifier, err := modifier.NewModifierFromClient(client, 15*time.Second, kubeClient, informerFactory, false, driverName)
if err != nil {
@@ -335,7 +335,7 @@ func TestRemovePVCFromModifyVolumeUncertainCache(t *testing.T) {
pvcInformer.Informer().GetStore().Add(obj)
case *v1.Pod:
podInformer.Informer().GetStore().Add(obj)
- case *storagev1beta1.VolumeAttributesClass:
+ case *storagev1.VolumeAttributesClass:
vacInformer.Informer().GetStore().Add(obj)
default:
t.Fatalf("Test %s: Unknown initalObject type: %+v", test.name, obj)
diff --git a/pkg/modifycontroller/modify_volume.go b/pkg/modifycontroller/modify_volume.go
index 7523ab624..4a8a691a9 100644
--- a/pkg/modifycontroller/modify_volume.go
+++ b/pkg/modifycontroller/modify_volume.go
@@ -25,7 +25,7 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
v1 "k8s.io/api/core/v1"
- storagev1beta1 "k8s.io/api/storage/v1beta1"
+ storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
@@ -112,7 +112,7 @@ func (ctrl *modifyController) validateVACAndModifyVolumeWithTarget(
func (ctrl *modifyController) controllerModifyVolumeWithTarget(
pvc *v1.PersistentVolumeClaim,
pv *v1.PersistentVolume,
- vacObj *storagev1beta1.VolumeAttributesClass,
+ vacObj *storagev1.VolumeAttributesClass,
pvcSpecVacName *string) (*v1.PersistentVolumeClaim, *v1.PersistentVolume, error, bool) {
var err error
pvc, pv, err = ctrl.callModifyVolumeOnPlugin(pvc, pv, vacObj)
@@ -160,7 +160,7 @@ func (ctrl *modifyController) controllerModifyVolumeWithTarget(
func (ctrl *modifyController) callModifyVolumeOnPlugin(
pvc *v1.PersistentVolumeClaim,
pv *v1.PersistentVolume,
- vac *storagev1beta1.VolumeAttributesClass) (*v1.PersistentVolumeClaim, *v1.PersistentVolume, error) {
+ vac *storagev1.VolumeAttributesClass) (*v1.PersistentVolumeClaim, *v1.PersistentVolume, error) {
if ctrl.extraModifyMetadata {
vac.Parameters[pvcNameKey] = pvc.GetName()
vac.Parameters[pvcNamespaceKey] = pvc.GetNamespace()
diff --git a/pkg/modifycontroller/modify_volume_test.go b/pkg/modifycontroller/modify_volume_test.go
index baffbe3ec..ff3892c5b 100644
--- a/pkg/modifycontroller/modify_volume_test.go
+++ b/pkg/modifycontroller/modify_volume_test.go
@@ -6,7 +6,7 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/kubernetes-csi/external-resizer/pkg/csi"
v1 "k8s.io/api/core/v1"
- storagev1beta1 "k8s.io/api/storage/v1beta1"
+ storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -16,13 +16,13 @@ import (
)
var (
- testVacObject = &storagev1beta1.VolumeAttributesClass{
+ testVacObject = &storagev1.VolumeAttributesClass{
ObjectMeta: metav1.ObjectMeta{Name: testVac},
DriverName: testDriverName,
Parameters: map[string]string{"iops": "3000"},
}
- targetVacObject = &storagev1beta1.VolumeAttributesClass{
+ targetVacObject = &storagev1.VolumeAttributesClass{
ObjectMeta: metav1.ObjectMeta{Name: targetVac},
DriverName: testDriverName,
Parameters: map[string]string{
diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
index 92b78048e..6f24dfff5 100644
--- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
+++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
@@ -1,5 +1,8 @@
# Change history of go-restful
+## [v3.12.2] - 2025-02-21
+
+- allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt)
## [v3.12.1] - 2024-05-28
@@ -18,7 +21,7 @@
- fix by restoring custom JSON handler functions (Mike Beaumont #540)
-## [v3.12.0] - 2023-08-19
+## [v3.11.0] - 2023-08-19
- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled.
diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md
index 7234604e4..3fb40d198 100644
--- a/vendor/github.com/emicklei/go-restful/v3/README.md
+++ b/vendor/github.com/emicklei/go-restful/v3/README.md
@@ -3,7 +3,7 @@ go-restful
package for building REST-style Web Services using Google Go
[](https://goreportcard.com/report/github.com/emicklei/go-restful)
-[](https://pkg.go.dev/github.com/emicklei/go-restful)
+[](https://pkg.go.dev/github.com/emicklei/go-restful/v3)
[](https://codecov.io/gh/emicklei/go-restful)
- [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples)
diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go
index a9b3faaa8..7f04bd905 100644
--- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go
+++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go
@@ -65,7 +65,7 @@ func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) ma
return params
}
-// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
+// https://download.oracle.com/otndocs/jcp/jaxrs-1.1-mrel-eval-oth-JSpec/
func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
candidates := make([]*Route, 0, 8)
for i, each := range routes {
@@ -126,9 +126,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
if trace {
traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType)
}
- if httpRequest.ContentLength > 0 {
- return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
- }
+ return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
}
// accept
@@ -151,20 +149,9 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
for _, candidate := range previous {
available = append(available, candidate.Produces...)
}
- // if POST,PUT,PATCH without body
- method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length")
- if (method == http.MethodPost ||
- method == http.MethodPut ||
- method == http.MethodPatch) && (length == "" || length == "0") {
- return nil, NewError(
- http.StatusUnsupportedMediaType,
- fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")),
- )
- }
return nil, NewError(
http.StatusNotAcceptable,
- fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")),
- )
+ fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")))
}
// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
return candidates[0], nil
diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go
index 306c44be7..a2056e2ac 100644
--- a/vendor/github.com/emicklei/go-restful/v3/route.go
+++ b/vendor/github.com/emicklei/go-restful/v3/route.go
@@ -111,6 +111,8 @@ func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
}
// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
+// If the route does not specify Consumes then return true (*/*).
+// If no content type is set then return true for GET,HEAD,OPTIONS,DELETE and TRACE.
func (r Route) matchesContentType(mimeTypes string) bool {
if len(r.Consumes) == 0 {
diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md
index af0a79507..da9f9e6f0 100644
--- a/vendor/github.com/fxamacker/cbor/v2/README.md
+++ b/vendor/github.com/fxamacker/cbor/v2/README.md
@@ -1,6 +1,4 @@
-# CBOR Codec in Go
-
-
+
CBOR Codec 
[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html).
@@ -8,23 +6,26 @@ CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name
`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
-See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). π `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer.
+See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). π `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer.
## fxamacker/cbor
[](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci)
-[](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22)
+[](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22)
[](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml)
[](#fuzzing-and-code-coverage)
[](https://goreportcard.com/report/github.com/fxamacker/cbor)
+[](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage)
`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc.
+API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options.
+
Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc.
-Highlights
+ π Highlights
__π Speed__
@@ -38,7 +39,7 @@ Codec passed multiple confidential security assessments in 2022. No vulnerabili
__ποΈ Data Size__
-Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64β32β16 when values fit.
+Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) automatically reduce size of encoded structs. Encoding optionally shrinks float64β32β16 when values fit.
__:jigsaw: Usability__
@@ -58,164 +59,201 @@ Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.
`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data.
-By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
-
-Example decoding with encoding/gob π₯ fatal error (out of memory)
-
-```Go
-// Example of encoding/gob having "fatal error: runtime: out of memory"
-// while decoding 181 bytes.
-package main
-import (
- "bytes"
- "encoding/gob"
- "encoding/hex"
- "fmt"
-)
-
-// Example data is from https://github.com/golang/go/issues/24446
-// (shortened to 181 bytes).
-const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
- "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
- "860001013001ff860001013001ffb80000001eff850401010e3030303030" +
- "30303030303030303001ff3000010c0104000016ffb70201010830303030" +
- "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
- "303030303030303030303030303030303030303030303030303030303030" +
- "30"
-
-type X struct {
- J *X
- K map[string]int
-}
-
-func main() {
- raw, _ := hex.DecodeString(data)
- decoder := gob.NewDecoder(bytes.NewReader(raw))
-
- var x X
- decoder.Decode(&x) // fatal error: runtime: out of memory
- fmt.Println("Decoding finished.")
-}
-```
-
-
-
-
-
-`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to
-decode 10 bytes of malicious CBOR data to `[]byte` (with default settings):
-
-| Codec | Speed (ns/op) | Memory | Allocs |
-| :---- | ------------: | -----: | -----: |
-| fxamacker/cbor 2.5.0 | 44 Β± 5% | 32 B/op | 2 allocs/op |
-| ugorji/go 1.2.11 | 5353261 Β± 4% | 67111321 B/op | 13 allocs/op |
-
-Benchmark details
-
-Latest comparison used:
-- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
-- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933)
-- go test -bench=. -benchmem -count=20
-
-#### Prior comparisons
-
-| Codec | Speed (ns/op) | Memory | Allocs |
-| :---- | ------------: | -----: | -----: |
-| fxamacker/cbor 2.5.0-beta2 | 44.33 Β± 2% | 32 B/op | 2 allocs/op |
-| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 Β± 6% | 32 B/op | 2 allocs/op |
-| ugorji/go 1.2.10 | 5524792.50 Β± 3% | 67110491 B/op | 12 allocs/op |
-| ugorji/go 1.1.0 - 1.2.6 | π₯ runtime: | out of memory: | cannot allocate |
-
-- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
-- go1.19.6, linux/amd64, i5-13600K (DDR4)
-- go test -bench=. -benchmem -count=20
-
-
-
-
-
-### Smaller Encodings with Struct Tags
-
-Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
-
-Example encoding 3-level nested Go struct to 1 byte CBOR
-
-https://go.dev/play/p/YxwvfPdFQG2
-
-```Go
-// Example encoding nested struct (with omitempty tag)
-// - encoding/json: 18 byte JSON
-// - fxamacker/cbor: 1 byte CBOR
-package main
-
-import (
- "encoding/hex"
- "encoding/json"
- "fmt"
-
- "github.com/fxamacker/cbor/v2"
-)
-
-type GrandChild struct {
- Quux int `json:",omitempty"`
-}
-
-type Child struct {
- Baz int `json:",omitempty"`
- Qux GrandChild `json:",omitempty"`
-}
-
-type Parent struct {
- Foo Child `json:",omitempty"`
- Bar int `json:",omitempty"`
-}
-
-func cb() {
- results, _ := cbor.Marshal(Parent{})
- fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
-
- text, _ := cbor.Diagnose(results) // Diagnostic Notation
- fmt.Println("DN: " + text)
-}
-
-func js() {
- results, _ := json.Marshal(Parent{})
- fmt.Println("hex(JSON): " + hex.EncodeToString(results))
-
- text := string(results) // JSON
- fmt.Println("JSON: " + text)
-}
-
-func main() {
- cb()
- fmt.Println("-------------")
- js()
-}
-```
-
-Output (DN is Diagnostic Notation):
-```
-hex(CBOR): a0
-DN: {}
--------------
-hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
-JSON: {"Foo":{"Qux":{}}}
-```
-
-
-
-
-
-Example using different struct tags together:
+Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data.
+
+> [!NOTE]
+> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`:
+>
+> | Codec | Speed (ns/op) | Memory | Allocs |
+> | :---- | ------------: | -----: | -----: |
+> | fxamacker/cbor 2.7.0 | 47 Β± 7% | 32 B/op | 2 allocs/op |
+> | ugorji/go 1.2.12 | 5878187 Β± 3% | 67111556 B/op | 13 allocs/op |
+>
+> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference.
+>
+> π Benchmark details
+>
+> Latest comparison for decoding CBOR data to Go `[]byte`:
+> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
+> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores)
+> - go test -bench=. -benchmem -count=20
+>
+> #### Prior comparisons
+>
+> | Codec | Speed (ns/op) | Memory | Allocs |
+> | :---- | ------------: | -----: | -----: |
+> | fxamacker/cbor 2.5.0-beta2 | 44.33 Β± 2% | 32 B/op | 2 allocs/op |
+> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 Β± 6% | 32 B/op | 2 allocs/op |
+> | ugorji/go 1.2.10 | 5524792.50 Β± 3% | 67110491 B/op | 12 allocs/op |
+> | ugorji/go 1.1.0 - 1.2.6 | π₯ runtime: | out of memory: | cannot allocate |
+>
+> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
+> - go1.19.6, linux/amd64, i5-13600K (DDR4)
+> - go test -bench=. -benchmem -count=20
+>
+>
+
+In contrast, some codecs can crash or use excessive resources while decoding bad data.
+
+> [!WARNING]
+> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
+>
+> π gob fatal error (out of memory) π₯ decoding 181 bytes
+>
+> ```Go
+> // Example of encoding/gob having "fatal error: runtime: out of memory"
+> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024).
+> package main
+> import (
+> "bytes"
+> "encoding/gob"
+> "encoding/hex"
+> "fmt"
+> )
+>
+> // Example data is from https://github.com/golang/go/issues/24446
+> // (shortened to 181 bytes).
+> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
+> "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
+> "860001013001ff860001013001ffb80000001eff850401010e3030303030" +
+> "30303030303030303001ff3000010c0104000016ffb70201010830303030" +
+> "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
+> "303030303030303030303030303030303030303030303030303030303030" +
+> "30"
+>
+> type X struct {
+> J *X
+> K map[string]int
+> }
+>
+> func main() {
+> raw, _ := hex.DecodeString(data)
+> decoder := gob.NewDecoder(bytes.NewReader(raw))
+>
+> var x X
+> decoder.Decode(&x) // fatal error: runtime: out of memory
+> fmt.Println("Decoding finished.")
+> }
+> ```
+>
+>
+>
+
+### Smaller Encodings with Struct Tag Options
+
+Struct tags automatically reduce encoded size of structs and improve speed.
+
+We can write less code by using struct tag options:
+- `toarray`: encode without field names (decode back to original struct)
+- `keyasint`: encode field names as integers (decode back to original struct)
+- `omitempty`: omit empty fields when encoding
+- `omitzero`: omit zero-value fields when encoding

-API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options.
+> [!NOTE]
+> `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte!
+> - `encoding/json`: 18 bytes of JSON
+> - `fxamacker/cbor`: 1 byte of CBOR
+>
+> π Encoding 3-level nested Go struct with omitempty
+>
+> https://go.dev/play/p/YxwvfPdFQG2
+>
+> ```Go
+> // Example encoding nested struct (with omitempty tag)
+> // - encoding/json: 18 byte JSON
+> // - fxamacker/cbor: 1 byte CBOR
+>
+> package main
+>
+> import (
+> "encoding/hex"
+> "encoding/json"
+> "fmt"
+>
+> "github.com/fxamacker/cbor/v2"
+> )
+>
+> type GrandChild struct {
+> Quux int `json:",omitempty"`
+> }
+>
+> type Child struct {
+> Baz int `json:",omitempty"`
+> Qux GrandChild `json:",omitempty"`
+> }
+>
+> type Parent struct {
+> Foo Child `json:",omitempty"`
+> Bar int `json:",omitempty"`
+> }
+>
+> func cb() {
+> results, _ := cbor.Marshal(Parent{})
+> fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
+>
+> text, _ := cbor.Diagnose(results) // Diagnostic Notation
+> fmt.Println("DN: " + text)
+> }
+>
+> func js() {
+> results, _ := json.Marshal(Parent{})
+> fmt.Println("hex(JSON): " + hex.EncodeToString(results))
+>
+> text := string(results) // JSON
+> fmt.Println("JSON: " + text)
+> }
+>
+> func main() {
+> cb()
+> fmt.Println("-------------")
+> js()
+> }
+> ```
+>
+> Output (DN is Diagnostic Notation):
+> ```
+> hex(CBOR): a0
+> DN: {}
+> -------------
+> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
+> JSON: {"Foo":{"Qux":{}}}
+> ```
+>
+>
+
## Quick Start
__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`.
+> [!TIP]
+>
+> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta).
+>
+> π More about tinygo feature branch
+>
+> ### Tinygo
+>
+> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go).
+>
+> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo.
+>
+> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet.
+>
+> Changes in this feature branch only affect tinygo compiled software. Summary of changes:
+> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32). User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33.
+> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature.
+> - encoding error message can be different when encoding function type.
+>
+> Related tinygo issues:
+> - https://github.com/tinygo-org/tinygo/issues/4277
+> - https://github.com/tinygo-org/tinygo/issues/4458
+>
+>
+
+
### Key Points
This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742).
@@ -252,16 +290,17 @@ rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes.
text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text
-// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes,
-// but new funcs UnmarshalFirst and DiagnoseFirst do not.
+// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but
+// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes.
```
-__IMPORTANT__: π CBOR settings allow trade-offs between speed, security, encoding size, etc.
-
-- Different CBOR libraries may use different default settings.
-- CBOR-based formats or protocols usually require specific settings.
-
-For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
+> [!IMPORTANT]
+> CBOR settings allow trade-offs between speed, security, encoding size, etc.
+>
+> - Different CBOR libraries may use different default settings.
+> - CBOR-based formats or protocols usually require specific settings.
+>
+> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
### Presets
@@ -312,9 +351,9 @@ err = em.MarshalToBuffer(v, &buf) // encode v to provided buf
### Struct Tags
-Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
+Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs.
-Example encoding 3-level nested Go struct to 1 byte CBOR
+ π Example encoding 3-level nested Go struct to 1 byte CBOR
https://go.dev/play/p/YxwvfPdFQG2
@@ -382,13 +421,13 @@ JSON: {"Foo":{"Qux":{}}}
-Example using several struct tags
+ π Example using struct tag options

-Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
+Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
### CBOR Tags
@@ -404,7 +443,7 @@ em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags
`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`.
-Example using TagSet and TagOptions
+ π Example using TagSet and TagOptions
```go
// Use signedCWT struct defined in "Decoding CWT" example.
@@ -430,7 +469,7 @@ if err := dm.Unmarshal(data, &v); err != nil {
em, _ := cbor.EncOptions{}.EncModeWithTags(tags)
// Marshal signedCWT with tag number.
-if data, err := cbor.Marshal(v); err != nil {
+if data, err := em.Marshal(v); err != nil {
return err
}
```
@@ -439,7 +478,7 @@ if data, err := cbor.Marshal(v); err != nil {
### Functions and Interfaces
-Functions and interfaces at a glance
+ π Functions and interfaces at a glance
Common functions with same API as `encoding/json`:
- `Marshal`, `Unmarshal`
@@ -472,11 +511,24 @@ Default limits may need to be increased for systems handling very large data (e.
## Status
-v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
+v2.8.0 (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality.
+
+v2.8.0 and v2.7.1 fixes these 3 functions (when called directly by user apps) to use same error handling on bad inputs as `cbor.Unmarshal()`:
+- `ByteString.UnmarshalCBOR()`
+- `RawTag.UnmarshalCBOR()`
+- `SimpleValue.UnmarshalCBOR()`
+
+The above 3 `UnmarshalCBOR()` functions were initially created for internal use and are deprecated now, so please use `Unmarshal()` or `UnmarshalFirst()` instead. To preserve backward compatibility, these deprecated functions were added to fuzz tests and will not be removed in v2.
+
+The minimum version of Go required to build:
+- v2.8.0 requires go 1.20.
+- v2.7.1 and older releases require go 1.17.
For more details, see [release notes](https://github.com/fxamacker/cbor/releases).
-### Prior Release
+### Prior Releases
+
+v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings.
@@ -489,7 +541,7 @@ See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0
See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc.
+
+
+## [1.35.0/0.57.0/0.11.0] 2025-03-05
+
+This release is the last to support [Go 1.22].
+The next release will require at least [Go 1.23].
+
+### Added
+
+- Add `ValueFromAttribute` and `KeyValueFromAttribute` in `go.opentelemetry.io/otel/log`. (#6180)
+- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/log`. (#6187)
+- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/log/logtest`. (#6187)
+- `AssertRecordEqual` in `go.opentelemetry.io/otel/log/logtest` checks `Record.EventName`. (#6187)
+- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/sdk/log`. (#6193)
+- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest`. (#6193)
+- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6211)
+- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6211)
+- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` (#6210)
+- The `go.opentelemetry.io/otel/semconv/v1.28.0` package.
+ The package contains semantic conventions from the `v1.28.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.28.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.27.0`(#6236)
+- The `go.opentelemetry.io/otel/semconv/v1.30.0` package.
+ The package contains semantic conventions from the `v1.30.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.30.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.28.0`(#6240)
+- Document the pitfalls of using `Resource` as a comparable type.
+ `Resource.Equal` and `Resource.Equivalent` should be used instead. (#6272)
+- Support [Go 1.24]. (#6304)
+- Add `FilterProcessor` and `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`.
+ It replaces `go.opentelemetry.io/otel/sdk/log/internal/x.FilterProcessor`.
+ Compared to previous version it additionally gives the possibility to filter by resource and instrumentation scope. (#6317)
+
+### Changed
+
+- Update `github.com/prometheus/common` to `v0.62.0`, which changes the `NameValidationScheme` to `NoEscaping`.
+ This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores.
+ This is controlled by the `Content-Type` header, or can be reverted by setting `NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6198)
+
+### Fixes
+
+- Eliminate goroutine leak for the processor returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `Shutdown` is called and the passed `ctx` is canceled and `SpanExporter.Shutdown` has not returned. (#6368)
+- Eliminate goroutine leak for the processor returned by `NewBatchSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `ForceFlush` is called and the passed `ctx` is canceled and `SpanExporter.Export` has not returned. (#6369)
+
+## [1.34.0/0.56.0/0.10.0] 2025-01-17
+
+### Changed
+
+- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167)
+
+### Fixed
+
+- Relax minimum Go version to 1.22.0 in various modules. (#6073)
+- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143)
+- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143)
+
## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12
### Added
@@ -37,9 +92,6 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997)
- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032)
-
-
-
## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08
### Added
@@ -3185,7 +3237,9 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.33.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...HEAD
+[1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0
+[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0
[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0
[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0
[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0
@@ -3275,6 +3329,7 @@ It contains api and sdk for trace and meter.
+[Go 1.24]: https://go.dev/doc/go1.24
[Go 1.23]: https://go.dev/doc/go1.23
[Go 1.22]: https://go.dev/doc/go1.22
[Go 1.21]: https://go.dev/doc/go1.21
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index 22a2e9dbd..7b8af585a 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -181,6 +181,18 @@ patterns in the spec.
For a deeper discussion, see
[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165).
+## Tests
+
+Each functionality should be covered by tests.
+
+Performance-critical functionality should also be covered by benchmarks.
+
+- Pull requests adding a performance-critical functionality
+should have `go test -bench` output in their description.
+- Pull requests changing a performance-critical functionality
+should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat)
+output in their description.
+
## Documentation
Each (non-internal, non-test) package must be documented using
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index a7f6d8cc6..226410d74 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -11,6 +11,10 @@ ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {}
GO = go
TIMEOUT = 60
+# User to run as in docker images.
+DOCKER_USER=$(shell id -u):$(shell id -g)
+DEPENDENCIES_DOCKERFILE=./dependencies.Dockerfile
+
.DEFAULT_GOAL := precommit
.PHONY: precommit ci
@@ -81,20 +85,20 @@ PIP := $(PYTOOLS)/pip
WORKDIR := /workdir
# The python image to use for the virtual environment.
-PYTHONIMAGE := python:3.11.3-slim-bullseye
+PYTHONIMAGE := $(shell awk '$$4=="python" {print $$2}' $(DEPENDENCIES_DOCKERFILE))
# Run the python image with the current directory mounted.
-DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE)
+DOCKERPY := docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE)
# Create a virtual environment for Python tools.
$(PYTOOLS):
# The `--upgrade` flag is needed to ensure that the virtual environment is
# created with the latest pip version.
- @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip"
+ @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade --cache-dir=$(WORKDIR)/.cache/pip pip"
# Install python packages into the virtual environment.
$(PYTOOLS)/%: $(PYTOOLS)
- @$(DOCKERPY) $(PIP) install -r requirements.txt
+ @$(DOCKERPY) $(PIP) install --cache-dir=$(WORKDIR)/.cache/pip -r requirements.txt
CODESPELL = $(PYTOOLS)/codespell
$(CODESPELL): PACKAGE=codespell
@@ -119,7 +123,7 @@ vanity-import-fix: $(PORTO)
# Generate go.work file for local development.
.PHONY: go-work
go-work: $(CROSSLINK)
- $(CROSSLINK) work --root=$(shell pwd)
+ $(CROSSLINK) work --root=$(shell pwd) --go=1.22.7
# Build
@@ -265,13 +269,30 @@ check-clean-work-tree:
exit 1; \
fi
+# The weaver docker image to use for semconv-generate.
+WEAVER_IMAGE := $(shell awk '$$4=="weaver" {print $$2}' $(DEPENDENCIES_DOCKERFILE))
+
SEMCONVPKG ?= "semconv/"
.PHONY: semconv-generate
-semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT)
+semconv-generate: $(SEMCONVKIT)
[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
- [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 )
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)"
+ # Ensure the target directory for source code is available.
+ mkdir -p $(PWD)/$(SEMCONVPKG)/${TAG}
+ # Note: We mount a home directory for downloading/storing the semconv repository.
+ # Weaver will automatically clean the cache when finished, but the directories will remain.
+ mkdir -p ~/.weaver
+ docker run --rm \
+ -u $(DOCKER_USER) \
+ --env HOME=/tmp/weaver \
+ --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \
+ --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \
+ --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \
+ $(WEAVER_IMAGE) registry generate \
+ --registry=https://github.com/open-telemetry/semantic-conventions/archive/refs/tags/$(TAG).zip[model] \
+ --templates=/home/weaver/templates \
+ --param tag=$(TAG) \
+ go \
+ /home/weaver/target
$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
.PHONY: gorelease
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index efec27890..8421cd7e5 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -1,9 +1,11 @@
# OpenTelemetry-Go
-[](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain)
+[](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml)
[](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main)
[](https://pkg.go.dev/go.opentelemetry.io/otel)
[](https://goreportcard.com/report/go.opentelemetry.io/otel)
+[](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go)
+[](https://www.bestpractices.dev/projects/9996)
[](https://cloud-native.slack.com/archives/C01NPAXACKT)
OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).
@@ -49,18 +51,25 @@ Currently, this project supports the following environments.
| OS | Go Version | Architecture |
|----------|------------|--------------|
+| Ubuntu | 1.24 | amd64 |
| Ubuntu | 1.23 | amd64 |
| Ubuntu | 1.22 | amd64 |
+| Ubuntu | 1.24 | 386 |
| Ubuntu | 1.23 | 386 |
| Ubuntu | 1.22 | 386 |
-| Linux | 1.23 | arm64 |
-| Linux | 1.22 | arm64 |
+| Ubuntu | 1.24 | arm64 |
+| Ubuntu | 1.23 | arm64 |
+| Ubuntu | 1.22 | arm64 |
+| macOS 13 | 1.24 | amd64 |
| macOS 13 | 1.23 | amd64 |
| macOS 13 | 1.22 | amd64 |
+| macOS | 1.24 | arm64 |
| macOS | 1.23 | arm64 |
| macOS | 1.22 | arm64 |
+| Windows | 1.24 | amd64 |
| Windows | 1.23 | amd64 |
| Windows | 1.22 | amd64 |
+| Windows | 1.24 | 386 |
| Windows | 1.23 | 386 |
| Windows | 1.22 | 386 |
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
index ffa9b6125..1e13ae54f 100644
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -5,17 +5,14 @@
New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated.
The `semconv-generate` make target is used for this.
-1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag.
-2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest`
-3. Run the `make semconv-generate ...` target from this repository.
+1. Set the `TAG` environment variable to the semantic convention tag you want to generate.
+2. Run the `make semconv-generate ...` target from this repository.
For example,
```sh
-export TAG="v1.21.0" # Change to the release version you are generating.
-export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions"
-docker pull otel/semconvgen:latest
-make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO.
+export TAG="v1.30.0" # Change to the release version you are generating.
+make semconv-generate # Uses the exported TAG.
```
This should create a new sub-package of [`semconv`](./semconv).
@@ -130,6 +127,6 @@ Importantly, bump any package versions referenced to be the latest one you just
Bump the dependencies in the following Go services:
-- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice)
-- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice)
-- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice)
+- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting)
+- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout)
+- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog)
diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
new file mode 100644
index 000000000..e4c4a753c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
@@ -0,0 +1,3 @@
+# This is a renovate-friendly source of Docker images.
+FROM python:3.13.2-slim-bullseye@sha256:31b581c8218e1f3c58672481b3b7dba8e898852866b408c6a984c22832523935 AS python
+FROM otel/weaver:v0.13.2@sha256:ae7346b992e477f629ea327e0979e8a416a97f7956ab1f7e95ac1f44edf1a893 AS weaver
diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json
index 0a29a2f13..a6fa353f9 100644
--- a/vendor/go.opentelemetry.io/otel/renovate.json
+++ b/vendor/go.opentelemetry.io/otel/renovate.json
@@ -1,7 +1,7 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
- "config:recommended"
+ "config:best-practices"
],
"ignorePaths": [],
"labels": ["Skip Changelog", "dependencies"],
@@ -15,10 +15,8 @@
"enabled": true
},
{
- "matchFileNames": ["internal/tools/**"],
- "matchManagers": ["gomod"],
- "matchDepTypes": ["indirect"],
- "enabled": false
+ "matchPackageNames": ["go.opentelemetry.io/build-tools/**"],
+ "groupName": "build-tools"
},
{
"matchPackageNames": ["google.golang.org/genproto/googleapis/**"],
diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt
index ab09daf9d..1bb55fb1c 100644
--- a/vendor/go.opentelemetry.io/otel/requirements.txt
+++ b/vendor/go.opentelemetry.io/otel/requirements.txt
@@ -1 +1 @@
-codespell==2.3.0
+codespell==2.4.1
diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go
new file mode 100644
index 000000000..7e2910025
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/auto.go
@@ -0,0 +1,661 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math"
+ "os"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unicode/utf8"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+ "go.opentelemetry.io/otel/trace/embedded"
+ "go.opentelemetry.io/otel/trace/internal/telemetry"
+)
+
+// newAutoTracerProvider returns an auto-instrumentable [trace.TracerProvider].
+// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument
+// the process using the returned TracerProvider, all of the telemetry it
+// produces will be processed and handled by that Instrumentation. By default,
+// if no Instrumentation instruments the TracerProvider it will not generate
+// any trace telemetry.
+func newAutoTracerProvider() TracerProvider { return tracerProviderInstance }
+
+var tracerProviderInstance = new(autoTracerProvider)
+
+type autoTracerProvider struct{ embedded.TracerProvider }
+
+var _ TracerProvider = autoTracerProvider{}
+
+func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer {
+ cfg := NewTracerConfig(opts...)
+ return autoTracer{
+ name: name,
+ version: cfg.InstrumentationVersion(),
+ schemaURL: cfg.SchemaURL(),
+ }
+}
+
+type autoTracer struct {
+ embedded.Tracer
+
+ name, schemaURL, version string
+}
+
+var _ Tracer = autoTracer{}
+
+func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOption) (context.Context, Span) {
+ var psc SpanContext
+ sampled := true
+ span := new(autoSpan)
+
+ // Ask eBPF for sampling decision and span context info.
+ t.start(ctx, span, &psc, &sampled, &span.spanContext)
+
+ span.sampled.Store(sampled)
+
+ ctx = ContextWithSpan(ctx, span)
+
+ if sampled {
+ // Only build traces if sampled.
+ cfg := NewSpanStartConfig(opts...)
+ span.traces, span.span = t.traces(name, cfg, span.spanContext, psc)
+ }
+
+ return ctx, span
+}
+
+// Expected to be implemented in eBPF.
+//
+//go:noinline
+func (t *autoTracer) start(
+ ctx context.Context,
+ spanPtr *autoSpan,
+ psc *SpanContext,
+ sampled *bool,
+ sc *SpanContext,
+) {
+ start(ctx, spanPtr, psc, sampled, sc)
+}
+
+// start is used for testing.
+var start = func(context.Context, *autoSpan, *SpanContext, *bool, *SpanContext) {}
+
+func (t autoTracer) traces(name string, cfg SpanConfig, sc, psc SpanContext) (*telemetry.Traces, *telemetry.Span) {
+ span := &telemetry.Span{
+ TraceID: telemetry.TraceID(sc.TraceID()),
+ SpanID: telemetry.SpanID(sc.SpanID()),
+ Flags: uint32(sc.TraceFlags()),
+ TraceState: sc.TraceState().String(),
+ ParentSpanID: telemetry.SpanID(psc.SpanID()),
+ Name: name,
+ Kind: spanKind(cfg.SpanKind()),
+ }
+
+ span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes())
+
+ links := cfg.Links()
+ if limit := maxSpan.Links; limit == 0 {
+ n := int64(len(links))
+ if n > 0 {
+ span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked.
+ }
+ } else {
+ if limit > 0 {
+ n := int64(max(len(links)-limit, 0))
+ span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked.
+ links = links[n:]
+ }
+ span.Links = convLinks(links)
+ }
+
+ if t := cfg.Timestamp(); !t.IsZero() {
+ span.StartTime = cfg.Timestamp()
+ } else {
+ span.StartTime = time.Now()
+ }
+
+ return &telemetry.Traces{
+ ResourceSpans: []*telemetry.ResourceSpans{
+ {
+ ScopeSpans: []*telemetry.ScopeSpans{
+ {
+ Scope: &telemetry.Scope{
+ Name: t.name,
+ Version: t.version,
+ },
+ Spans: []*telemetry.Span{span},
+ SchemaURL: t.schemaURL,
+ },
+ },
+ },
+ },
+ }, span
+}
+
+func spanKind(kind SpanKind) telemetry.SpanKind {
+ switch kind {
+ case SpanKindInternal:
+ return telemetry.SpanKindInternal
+ case SpanKindServer:
+ return telemetry.SpanKindServer
+ case SpanKindClient:
+ return telemetry.SpanKindClient
+ case SpanKindProducer:
+ return telemetry.SpanKindProducer
+ case SpanKindConsumer:
+ return telemetry.SpanKindConsumer
+ }
+ return telemetry.SpanKind(0) // undefined.
+}
+
+type autoSpan struct {
+ embedded.Span
+
+ spanContext SpanContext
+ sampled atomic.Bool
+
+ mu sync.Mutex
+ traces *telemetry.Traces
+ span *telemetry.Span
+}
+
+func (s *autoSpan) SpanContext() SpanContext {
+ if s == nil {
+ return SpanContext{}
+ }
+ // s.spanContext is immutable, do not acquire lock s.mu.
+ return s.spanContext
+}
+
+func (s *autoSpan) IsRecording() bool {
+ if s == nil {
+ return false
+ }
+
+ return s.sampled.Load()
+}
+
+func (s *autoSpan) SetStatus(c codes.Code, msg string) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if s.span.Status == nil {
+ s.span.Status = new(telemetry.Status)
+ }
+
+ s.span.Status.Message = msg
+
+ switch c {
+ case codes.Unset:
+ s.span.Status.Code = telemetry.StatusCodeUnset
+ case codes.Error:
+ s.span.Status.Code = telemetry.StatusCodeError
+ case codes.Ok:
+ s.span.Status.Code = telemetry.StatusCodeOK
+ }
+}
+
+func (s *autoSpan) SetAttributes(attrs ...attribute.KeyValue) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ limit := maxSpan.Attrs
+ if limit == 0 {
+ // No attributes allowed.
+ n := int64(len(attrs))
+ if n > 0 {
+ s.span.DroppedAttrs += uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked.
+ }
+ return
+ }
+
+ m := make(map[string]int)
+ for i, a := range s.span.Attrs {
+ m[a.Key] = i
+ }
+
+ for _, a := range attrs {
+ val := convAttrValue(a.Value)
+ if val.Empty() {
+ s.span.DroppedAttrs++
+ continue
+ }
+
+ if idx, ok := m[string(a.Key)]; ok {
+ s.span.Attrs[idx] = telemetry.Attr{
+ Key: string(a.Key),
+ Value: val,
+ }
+ } else if limit < 0 || len(s.span.Attrs) < limit {
+ s.span.Attrs = append(s.span.Attrs, telemetry.Attr{
+ Key: string(a.Key),
+ Value: val,
+ })
+ m[string(a.Key)] = len(s.span.Attrs) - 1
+ } else {
+ s.span.DroppedAttrs++
+ }
+ }
+}
+
+// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The
+// number of dropped attributes is also returned.
+func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) {
+ n := len(attrs)
+ if limit == 0 {
+ var out uint32
+ if n > 0 {
+ out = uint32(min(int64(n), math.MaxUint32)) // nolint: gosec // Bounds checked.
+ }
+ return nil, out
+ }
+
+ if limit < 0 {
+ // Unlimited.
+ return convAttrs(attrs), 0
+ }
+
+ if n < 0 {
+ n = 0
+ }
+
+ limit = min(n, limit)
+ return convAttrs(attrs[:limit]), uint32(n - limit) // nolint: gosec // Bounds checked.
+}
+
+func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr {
+ if len(attrs) == 0 {
+ // Avoid allocations if not necessary.
+ return nil
+ }
+
+ out := make([]telemetry.Attr, 0, len(attrs))
+ for _, attr := range attrs {
+ key := string(attr.Key)
+ val := convAttrValue(attr.Value)
+ if val.Empty() {
+ continue
+ }
+ out = append(out, telemetry.Attr{Key: key, Value: val})
+ }
+ return out
+}
+
+func convAttrValue(value attribute.Value) telemetry.Value {
+ switch value.Type() {
+ case attribute.BOOL:
+ return telemetry.BoolValue(value.AsBool())
+ case attribute.INT64:
+ return telemetry.Int64Value(value.AsInt64())
+ case attribute.FLOAT64:
+ return telemetry.Float64Value(value.AsFloat64())
+ case attribute.STRING:
+ v := truncate(maxSpan.AttrValueLen, value.AsString())
+ return telemetry.StringValue(v)
+ case attribute.BOOLSLICE:
+ slice := value.AsBoolSlice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.BoolValue(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.INT64SLICE:
+ slice := value.AsInt64Slice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.Int64Value(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.FLOAT64SLICE:
+ slice := value.AsFloat64Slice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.Float64Value(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.STRINGSLICE:
+ slice := value.AsStringSlice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ v = truncate(maxSpan.AttrValueLen, v)
+ out = append(out, telemetry.StringValue(v))
+ }
+ return telemetry.SliceValue(out...)
+ }
+ return telemetry.Value{}
+}
+
+// truncate returns a truncated version of s such that it contains less than
+// the limit number of characters. Truncation is applied by returning the limit
+// number of valid characters contained in s.
+//
+// If limit is negative, it returns the original string.
+//
+// UTF-8 is supported. When truncating, all invalid characters are dropped
+// before applying truncation.
+//
+// If s already contains less than the limit number of bytes, it is returned
+// unchanged. No invalid characters are removed.
+func truncate(limit int, s string) string {
+ // This prioritize performance in the following order based on the most
+ // common expected use-cases.
+ //
+ // - Short values less than the default limit (128).
+ // - Strings with valid encodings that exceed the limit.
+ // - No limit.
+ // - Strings with invalid encodings that exceed the limit.
+ if limit < 0 || len(s) <= limit {
+ return s
+ }
+
+ // Optimistically, assume all valid UTF-8.
+ var b strings.Builder
+ count := 0
+ for i, c := range s {
+ if c != utf8.RuneError {
+ count++
+ if count > limit {
+ return s[:i]
+ }
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // Invalid encoding.
+ b.Grow(len(s) - 1)
+ _, _ = b.WriteString(s[:i])
+ s = s[i:]
+ break
+ }
+ }
+
+ // Fast-path, no invalid input.
+ if b.Cap() == 0 {
+ return s
+ }
+
+ // Truncate while validating UTF-8.
+ for i := 0; i < len(s) && count < limit; {
+ c := s[i]
+ if c < utf8.RuneSelf {
+ // Optimization for single byte runes (common case).
+ _ = b.WriteByte(c)
+ i++
+ count++
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // We checked for all 1-byte runes above, this is a RuneError.
+ i++
+ continue
+ }
+
+ _, _ = b.WriteString(s[i : i+size])
+ i += size
+ count++
+ }
+
+ return b.String()
+}
+
+func (s *autoSpan) End(opts ...SpanEndOption) {
+ if s == nil || !s.sampled.Swap(false) {
+ return
+ }
+
+ // s.end exists so the lock (s.mu) is not held while s.ended is called.
+ s.ended(s.end(opts))
+}
+
+func (s *autoSpan) end(opts []SpanEndOption) []byte {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ cfg := NewSpanEndConfig(opts...)
+ if t := cfg.Timestamp(); !t.IsZero() {
+ s.span.EndTime = cfg.Timestamp()
+ } else {
+ s.span.EndTime = time.Now()
+ }
+
+ b, _ := json.Marshal(s.traces) // TODO: do not ignore this error.
+ return b
+}
+
+// Expected to be implemented in eBPF.
+//
+//go:noinline
+func (*autoSpan) ended(buf []byte) { ended(buf) }
+
+// ended is used for testing.
+var ended = func([]byte) {}
+
+func (s *autoSpan) RecordError(err error, opts ...EventOption) {
+ if s == nil || err == nil || !s.sampled.Load() {
+ return
+ }
+
+ cfg := NewEventConfig(opts...)
+
+ attrs := cfg.Attributes()
+ attrs = append(attrs,
+ semconv.ExceptionType(typeStr(err)),
+ semconv.ExceptionMessage(err.Error()),
+ )
+ if cfg.StackTrace() {
+ buf := make([]byte, 2048)
+ n := runtime.Stack(buf, false)
+ attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n])))
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs)
+}
+
+func typeStr(i any) string {
+ t := reflect.TypeOf(i)
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ return t.String()
+ }
+ return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+}
+
+func (s *autoSpan) AddEvent(name string, opts ...EventOption) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ cfg := NewEventConfig(opts...)
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.addEvent(name, cfg.Timestamp(), cfg.Attributes())
+}
+
+// addEvent adds an event with name and attrs at tStamp to the span. The span
+// lock (s.mu) needs to be held by the caller.
+func (s *autoSpan) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) {
+ limit := maxSpan.Events
+
+ if limit == 0 {
+ s.span.DroppedEvents++
+ return
+ }
+
+ if limit > 0 && len(s.span.Events) == limit {
+ // Drop head while avoiding allocation of more capacity.
+ copy(s.span.Events[:limit-1], s.span.Events[1:])
+ s.span.Events = s.span.Events[:limit-1]
+ s.span.DroppedEvents++
+ }
+
+ e := &telemetry.SpanEvent{Time: tStamp, Name: name}
+ e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs)
+
+ s.span.Events = append(s.span.Events, e)
+}
+
+func (s *autoSpan) AddLink(link Link) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ l := maxSpan.Links
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if l == 0 {
+ s.span.DroppedLinks++
+ return
+ }
+
+ if l > 0 && len(s.span.Links) == l {
+ // Drop head while avoiding allocation of more capacity.
+ copy(s.span.Links[:l-1], s.span.Links[1:])
+ s.span.Links = s.span.Links[:l-1]
+ s.span.DroppedLinks++
+ }
+
+ s.span.Links = append(s.span.Links, convLink(link))
+}
+
+func convLinks(links []Link) []*telemetry.SpanLink {
+ out := make([]*telemetry.SpanLink, 0, len(links))
+ for _, link := range links {
+ out = append(out, convLink(link))
+ }
+ return out
+}
+
+func convLink(link Link) *telemetry.SpanLink {
+ l := &telemetry.SpanLink{
+ TraceID: telemetry.TraceID(link.SpanContext.TraceID()),
+ SpanID: telemetry.SpanID(link.SpanContext.SpanID()),
+ TraceState: link.SpanContext.TraceState().String(),
+ Flags: uint32(link.SpanContext.TraceFlags()),
+ }
+ l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes)
+
+ return l
+}
+
+func (s *autoSpan) SetName(name string) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.span.Name = name
+}
+
+func (*autoSpan) TracerProvider() TracerProvider { return newAutoTracerProvider() }
+
+// maxSpan are the span limits resolved during startup.
+var maxSpan = newSpanLimits()
+
+type spanLimits struct {
+ // Attrs is the number of allowed attributes for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
+ // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if
+ // that is not set, is used.
+ Attrs int
+ // AttrValueLen is the maximum attribute value length allowed for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
+ // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1
+ // if that is not set, is used.
+ AttrValueLen int
+ // Events is the number of allowed events for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set.
+ Events int
+ // EventAttrs is the number of allowed attributes for a span event.
+ //
+ // The is resolved from the environment variable value for the
+ // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set.
+ EventAttrs int
+ // Links is the number of allowed Links for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set.
+ Links int
+ // LinkAttrs is the number of allowed attributes for a span link.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set.
+ LinkAttrs int
+}
+
+func newSpanLimits() spanLimits {
+ return spanLimits{
+ Attrs: firstEnv(
+ 128,
+ "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT",
+ "OTEL_ATTRIBUTE_COUNT_LIMIT",
+ ),
+ AttrValueLen: firstEnv(
+ -1, // Unlimited.
+ "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT",
+ "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT",
+ ),
+ Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"),
+ EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"),
+ Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"),
+ LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"),
+ }
+}
+
+// firstEnv returns the parsed integer value of the first matching environment
+// variable from keys. The defaultVal is returned if the value is not an
+// integer or no match is found.
+func firstEnv(defaultVal int, keys ...string) int {
+ for _, key := range keys {
+ strV := os.Getenv(key)
+ if strV == "" {
+ continue
+ }
+
+ v, err := strconv.Atoi(strV)
+ if err == nil {
+ return v
+ }
+ // Ignore invalid environment variable.
+ }
+
+ return defaultVal
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
new file mode 100644
index 000000000..f663547b4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
@@ -0,0 +1,58 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+// Attr is a key-value pair.
+type Attr struct {
+ Key string `json:"key,omitempty"`
+ Value Value `json:"value,omitempty"`
+}
+
+// String returns an Attr for a string value.
+func String(key, value string) Attr {
+ return Attr{key, StringValue(value)}
+}
+
+// Int64 returns an Attr for an int64 value.
+func Int64(key string, value int64) Attr {
+ return Attr{key, Int64Value(value)}
+}
+
+// Int returns an Attr for an int value.
+func Int(key string, value int) Attr {
+ return Int64(key, int64(value))
+}
+
+// Float64 returns an Attr for a float64 value.
+func Float64(key string, value float64) Attr {
+ return Attr{key, Float64Value(value)}
+}
+
+// Bool returns an Attr for a bool value.
+func Bool(key string, value bool) Attr {
+ return Attr{key, BoolValue(value)}
+}
+
+// Bytes returns an Attr for a []byte value.
+// The passed slice must not be changed after it is passed.
+func Bytes(key string, value []byte) Attr {
+ return Attr{key, BytesValue(value)}
+}
+
+// Slice returns an Attr for a []Value value.
+// The passed slice must not be changed after it is passed.
+func Slice(key string, value ...Value) Attr {
+ return Attr{key, SliceValue(value...)}
+}
+
+// Map returns an Attr for a map value.
+// The passed slice must not be changed after it is passed.
+func Map(key string, value ...Attr) Attr {
+ return Attr{key, MapValue(value...)}
+}
+
+// Equal returns if a is equal to b.
+func (a Attr) Equal(b Attr) bool {
+ return a.Key == b.Key && a.Value.Equal(b.Value)
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go
new file mode 100644
index 000000000..5debe90bb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go
@@ -0,0 +1,8 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package telemetry provides a lightweight representations of OpenTelemetry
+telemetry that is compatible with the OTLP JSON protobuf encoding.
+*/
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
new file mode 100644
index 000000000..7b1ae3c4e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
@@ -0,0 +1,103 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+)
+
+const (
+ traceIDSize = 16
+ spanIDSize = 8
+)
+
+// TraceID is a custom data type that is used for all trace IDs.
+type TraceID [traceIDSize]byte
+
+// String returns the hex string representation form of a TraceID.
+func (tid TraceID) String() string {
+ return hex.EncodeToString(tid[:])
+}
+
+// IsEmpty returns false if id contains at least one non-zero byte.
+func (tid TraceID) IsEmpty() bool {
+ return tid == [traceIDSize]byte{}
+}
+
+// MarshalJSON converts the trace ID into a hex string enclosed in quotes.
+func (tid TraceID) MarshalJSON() ([]byte, error) {
+ if tid.IsEmpty() {
+ return []byte(`""`), nil
+ }
+ return marshalJSON(tid[:])
+}
+
+// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in
+// quotes.
+func (tid *TraceID) UnmarshalJSON(data []byte) error {
+ *tid = [traceIDSize]byte{}
+ return unmarshalJSON(tid[:], data)
+}
+
+// SpanID is a custom data type that is used for all span IDs.
+type SpanID [spanIDSize]byte
+
+// String returns the hex string representation form of a SpanID.
+func (sid SpanID) String() string {
+ return hex.EncodeToString(sid[:])
+}
+
+// IsEmpty returns true if the span ID contains at least one non-zero byte.
+func (sid SpanID) IsEmpty() bool {
+ return sid == [spanIDSize]byte{}
+}
+
+// MarshalJSON converts span ID into a hex string enclosed in quotes.
+func (sid SpanID) MarshalJSON() ([]byte, error) {
+ if sid.IsEmpty() {
+ return []byte(`""`), nil
+ }
+ return marshalJSON(sid[:])
+}
+
+// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes.
+func (sid *SpanID) UnmarshalJSON(data []byte) error {
+ *sid = [spanIDSize]byte{}
+ return unmarshalJSON(sid[:], data)
+}
+
+// marshalJSON converts id into a hex string enclosed in quotes.
+func marshalJSON(id []byte) ([]byte, error) {
+ // Plus 2 quote chars at the start and end.
+ hexLen := hex.EncodedLen(len(id)) + 2
+
+ b := make([]byte, hexLen)
+ hex.Encode(b[1:hexLen-1], id)
+ b[0], b[hexLen-1] = '"', '"'
+
+ return b, nil
+}
+
+// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
+func unmarshalJSON(dst []byte, src []byte) error {
+ if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
+ src = src[1 : l-1]
+ }
+ nLen := len(src)
+ if nLen == 0 {
+ return nil
+ }
+
+ if len(dst) != hex.DecodedLen(nLen) {
+ return errors.New("invalid length for ID")
+ }
+
+ _, err := hex.Decode(dst, src)
+ if err != nil {
+ return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go
new file mode 100644
index 000000000..f5e3a8cec
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go
@@ -0,0 +1,67 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "encoding/json"
+ "strconv"
+)
+
+// protoInt64 represents the protobuf encoding of integers which can be either
+// strings or integers.
+type protoInt64 int64
+
+// Int64 returns the protoInt64 as an int64.
+func (i *protoInt64) Int64() int64 { return int64(*i) }
+
+// UnmarshalJSON decodes both strings and integers.
+func (i *protoInt64) UnmarshalJSON(data []byte) error {
+ if data[0] == '"' {
+ var str string
+ if err := json.Unmarshal(data, &str); err != nil {
+ return err
+ }
+ parsedInt, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return err
+ }
+ *i = protoInt64(parsedInt)
+ } else {
+ var parsedInt int64
+ if err := json.Unmarshal(data, &parsedInt); err != nil {
+ return err
+ }
+ *i = protoInt64(parsedInt)
+ }
+ return nil
+}
+
+// protoUint64 represents the protobuf encoding of integers which can be either
+// strings or integers.
+type protoUint64 uint64
+
+// Int64 returns the protoUint64 as a uint64.
+func (i *protoUint64) Uint64() uint64 { return uint64(*i) }
+
+// UnmarshalJSON decodes both strings and integers.
+func (i *protoUint64) UnmarshalJSON(data []byte) error {
+ if data[0] == '"' {
+ var str string
+ if err := json.Unmarshal(data, &str); err != nil {
+ return err
+ }
+ parsedUint, err := strconv.ParseUint(str, 10, 64)
+ if err != nil {
+ return err
+ }
+ *i = protoUint64(parsedUint)
+ } else {
+ var parsedUint uint64
+ if err := json.Unmarshal(data, &parsedUint); err != nil {
+ return err
+ }
+ *i = protoUint64(parsedUint)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go
new file mode 100644
index 000000000..1798a702d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go
@@ -0,0 +1,66 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Resource information.
+type Resource struct {
+ // Attrs are the set of attributes that describe the resource. Attribute
+ // keys MUST be unique (it is not allowed to have more than one attribute
+ // with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // DroppedAttrs is the number of dropped attributes. If the value
+ // is 0, then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
+func (r *Resource) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Resource type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Resource field: %#v", keyIface)
+ }
+
+ switch key {
+ case "attributes":
+ err = decoder.Decode(&r.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&r.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go
new file mode 100644
index 000000000..c2b4c635b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go
@@ -0,0 +1,67 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Scope is the identifying values of the instrumentation scope.
+type Scope struct {
+ Name string `json:"name,omitempty"`
+ Version string `json:"version,omitempty"`
+ Attrs []Attr `json:"attributes,omitempty"`
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
+func (s *Scope) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Scope type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Scope field: %#v", keyIface)
+ }
+
+ switch key {
+ case "name":
+ err = decoder.Decode(&s.Name)
+ case "version":
+ err = decoder.Decode(&s.Version)
+ case "attributes":
+ err = decoder.Decode(&s.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&s.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go
new file mode 100644
index 000000000..3c5e1cdb1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go
@@ -0,0 +1,460 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "time"
+)
+
+// A Span represents a single operation performed by a single component of the
+// system.
+type Span struct {
+ // A unique identifier for a trace. All spans from the same trace share
+ // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
+ // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ TraceID TraceID `json:"traceId,omitempty"`
+ // A unique identifier for a span within a trace, assigned when the span
+ // is created. The ID is an 8-byte array. An ID with all zeroes OR of length
+ // other than 8 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ SpanID SpanID `json:"spanId,omitempty"`
+ // trace_state conveys information about request position in multiple distributed tracing graphs.
+ // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
+ // See also https://github.com/w3c/distributed-tracing for more details about this field.
+ TraceState string `json:"traceState,omitempty"`
+ // The `span_id` of this span's parent span. If this is a root span, then this
+ // field must be empty. The ID is an 8-byte array.
+ ParentSpanID SpanID `json:"parentSpanId,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether a span's parent
+ // is remote. The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // When creating span messages, if the message is logically forwarded from another source
+ // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
+ // be copied as-is. If creating from a source that does not have an equivalent flags field
+ // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST
+ // be set to zero.
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ //
+ // [Optional].
+ Flags uint32 `json:"flags,omitempty"`
+ // A description of the span's operation.
+ //
+ // For example, the name can be a qualified method name or a file name
+ // and a line number where the operation is called. A best practice is to use
+ // the same display name at the same call point in an application.
+ // This makes it easier to correlate spans in different traces.
+ //
+ // This field is semantically required to be set to non-empty string.
+ // Empty value is equivalent to an unknown span name.
+ //
+ // This field is required.
+ Name string `json:"name"`
+ // Distinguishes between spans generated in a particular context. For example,
+ // two spans with the same name may be distinguished using `CLIENT` (caller)
+ // and `SERVER` (callee) to identify queueing latency associated with the span.
+ Kind SpanKind `json:"kind,omitempty"`
+ // start_time_unix_nano is the start time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution starts. On the server side, this
+ // is the time when the server's application handler starts running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ StartTime time.Time `json:"startTimeUnixNano,omitempty"`
+ // end_time_unix_nano is the end time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution ends. On the server side, this
+ // is the time when the server application handler stops running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ EndTime time.Time `json:"endTimeUnixNano,omitempty"`
+ // attributes is a collection of key/value pairs. Note, global attributes
+ // like server name can be set using the resource API. Examples of attributes:
+ //
+ // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
+ // "/http/server_latency": 300
+ // "example.com/myattribute": true
+ // "example.com/score": 10.239
+ //
+ // The OpenTelemetry API specification further restricts the allowed value types:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of attributes that were discarded. Attributes
+ // can be discarded because their keys are too long or because there are too many
+ // attributes. If this value is 0, then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+ // events is a collection of Event items.
+ Events []*SpanEvent `json:"events,omitempty"`
+ // dropped_events_count is the number of dropped events. If the value is 0, then no
+ // events were dropped.
+ DroppedEvents uint32 `json:"droppedEventsCount,omitempty"`
+ // links is a collection of Links, which are references from this span to a span
+ // in the same or different trace.
+ Links []*SpanLink `json:"links,omitempty"`
+ // dropped_links_count is the number of dropped links after the maximum size was
+ // enforced. If this value is 0, then no links were dropped.
+ DroppedLinks uint32 `json:"droppedLinksCount,omitempty"`
+ // An optional final status for this span. Semantically when Status isn't set, it means
+ // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
+ Status *Status `json:"status,omitempty"`
+}
+
+// MarshalJSON encodes s into OTLP formatted JSON.
+func (s Span) MarshalJSON() ([]byte, error) {
+ startT := s.StartTime.UnixNano()
+ if s.StartTime.IsZero() || startT < 0 {
+ startT = 0
+ }
+
+ endT := s.EndTime.UnixNano()
+ if s.EndTime.IsZero() || endT < 0 {
+ endT = 0
+ }
+
+ // Override non-empty default SpanID marshal and omitempty.
+ var parentSpanId string
+ if !s.ParentSpanID.IsEmpty() {
+ b := make([]byte, hex.EncodedLen(spanIDSize))
+ hex.Encode(b, s.ParentSpanID[:])
+ parentSpanId = string(b)
+ }
+
+ type Alias Span
+ return json.Marshal(struct {
+ Alias
+ ParentSpanID string `json:"parentSpanId,omitempty"`
+ StartTime uint64 `json:"startTimeUnixNano,omitempty"`
+ EndTime uint64 `json:"endTimeUnixNano,omitempty"`
+ }{
+ Alias: Alias(s),
+ ParentSpanID: parentSpanId,
+ StartTime: uint64(startT), // nolint:gosec // >0 checked above.
+ EndTime: uint64(endT), // nolint:gosec // >0 checked above.
+ })
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s.
+func (s *Span) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Span type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Span field: %#v", keyIface)
+ }
+
+ switch key {
+ case "traceId", "trace_id":
+ err = decoder.Decode(&s.TraceID)
+ case "spanId", "span_id":
+ err = decoder.Decode(&s.SpanID)
+ case "traceState", "trace_state":
+ err = decoder.Decode(&s.TraceState)
+ case "parentSpanId", "parent_span_id":
+ err = decoder.Decode(&s.ParentSpanID)
+ case "flags":
+ err = decoder.Decode(&s.Flags)
+ case "name":
+ err = decoder.Decode(&s.Name)
+ case "kind":
+ err = decoder.Decode(&s.Kind)
+ case "startTimeUnixNano", "start_time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked.
+ s.StartTime = time.Unix(0, v)
+ case "endTimeUnixNano", "end_time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked.
+ s.EndTime = time.Unix(0, v)
+ case "attributes":
+ err = decoder.Decode(&s.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&s.DroppedAttrs)
+ case "events":
+ err = decoder.Decode(&s.Events)
+ case "droppedEventsCount", "dropped_events_count":
+ err = decoder.Decode(&s.DroppedEvents)
+ case "links":
+ err = decoder.Decode(&s.Links)
+ case "droppedLinksCount", "dropped_links_count":
+ err = decoder.Decode(&s.DroppedLinks)
+ case "status":
+ err = decoder.Decode(&s.Status)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// SpanFlags represents constants used to interpret the
+// Span.flags field, which is protobuf 'fixed32' type and is to
+// be used as bit-fields. Each non-zero value defined in this enum is
+// a bit-mask. To extract the bit-field, for example, use an
+// expression like:
+//
+// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
+//
+// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+//
+// Note that Span flags were introduced in version 1.1 of the
+// OpenTelemetry protocol. Older Span producers do not set this
+// field, consequently consumers should not rely on the absence of a
+// particular flag bit to indicate the presence of a particular feature.
+type SpanFlags int32
+
+const (
+ // Bits 0-7 are used for trace flags.
+ SpanFlagsTraceFlagsMask SpanFlags = 255
+ // Bits 8 and 9 are used to indicate that the parent span or link span is remote.
+ // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
+ // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
+ SpanFlagsContextHasIsRemoteMask SpanFlags = 256
+ // SpanFlagsContextHasIsRemoteMask indicates the Span is remote.
+ SpanFlagsContextIsRemoteMask SpanFlags = 512
+)
+
+// SpanKind is the type of span. Can be used to specify additional relationships between spans
+// in addition to a parent/child relationship.
+type SpanKind int32
+
+const (
+ // Indicates that the span represents an internal operation within an application,
+ // as opposed to an operation happening at the boundaries. Default value.
+ SpanKindInternal SpanKind = 1
+ // Indicates that the span covers server-side handling of an RPC or other
+ // remote network request.
+ SpanKindServer SpanKind = 2
+ // Indicates that the span describes a request to some remote service.
+ SpanKindClient SpanKind = 3
+ // Indicates that the span describes a producer sending a message to a broker.
+ // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
+ // between producer and consumer spans. A PRODUCER span ends when the message was accepted
+ // by the broker while the logical processing of the message might span a much longer time.
+ SpanKindProducer SpanKind = 4
+ // Indicates that the span describes consumer receiving a message from a broker.
+ // Like the PRODUCER kind, there is often no direct critical path latency relationship
+ // between producer and consumer spans.
+ SpanKindConsumer SpanKind = 5
+)
+
+// Event is a time-stamped annotation of the span, consisting of user-supplied
+// text description and key-value pairs.
+type SpanEvent struct {
+ // time_unix_nano is the time the event occurred.
+ Time time.Time `json:"timeUnixNano,omitempty"`
+ // name of the event.
+ // This field is semantically required to be set to non-empty string.
+ Name string `json:"name,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the event.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// MarshalJSON encodes e into OTLP formatted JSON.
+func (e SpanEvent) MarshalJSON() ([]byte, error) {
+ t := e.Time.UnixNano()
+ if e.Time.IsZero() || t < 0 {
+ t = 0
+ }
+
+ type Alias SpanEvent
+ return json.Marshal(struct {
+ Alias
+ Time uint64 `json:"timeUnixNano,omitempty"`
+ }{
+ Alias: Alias(e),
+ Time: uint64(t), // nolint: gosec // >0 checked above
+ })
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se.
+func (se *SpanEvent) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid SpanEvent type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid SpanEvent field: %#v", keyIface)
+ }
+
+ switch key {
+ case "timeUnixNano", "time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked.
+ se.Time = time.Unix(0, v)
+ case "name":
+ err = decoder.Decode(&se.Name)
+ case "attributes":
+ err = decoder.Decode(&se.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&se.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A pointer from the current span to another span in the same trace or in a
+// different trace. For example, this can be used in batching operations,
+// where a single batch handler processes multiple requests from different
+// traces or when the handler receives a request from a different project.
+type SpanLink struct {
+ // A unique identifier of a trace that this linked span is part of. The ID is a
+ // 16-byte array.
+ TraceID TraceID `json:"traceId,omitempty"`
+ // A unique identifier for the linked span. The ID is an 8-byte array.
+ SpanID SpanID `json:"spanId,omitempty"`
+ // The trace_state associated with the link.
+ TraceState string `json:"traceState,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the link.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether the link is remote.
+ // The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero.
+ //
+ // [Optional].
+ Flags uint32 `json:"flags,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl.
+func (sl *SpanLink) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid SpanLink type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid SpanLink field: %#v", keyIface)
+ }
+
+ switch key {
+ case "traceId", "trace_id":
+ err = decoder.Decode(&sl.TraceID)
+ case "spanId", "span_id":
+ err = decoder.Decode(&sl.SpanID)
+ case "traceState", "trace_state":
+ err = decoder.Decode(&sl.TraceState)
+ case "attributes":
+ err = decoder.Decode(&sl.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&sl.DroppedAttrs)
+ case "flags":
+ err = decoder.Decode(&sl.Flags)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go
new file mode 100644
index 000000000..1d013a8fa
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go
@@ -0,0 +1,40 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+// For the semantics of status codes see
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
+type StatusCode int32
+
+const (
+ // The default status.
+ StatusCodeUnset StatusCode = 0
+ // The Span has been validated by an Application developer or Operator to
+ // have completed successfully.
+ StatusCodeOK StatusCode = 1
+ // The Span contains an error.
+ StatusCodeError StatusCode = 2
+)
+
+var statusCodeStrings = []string{
+ "Unset",
+ "OK",
+ "Error",
+}
+
+func (s StatusCode) String() string {
+ if s >= 0 && int(s) < len(statusCodeStrings) {
+ return statusCodeStrings[s]
+ }
+ return ""
+}
+
+// The Status type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs.
+type Status struct {
+ // A developer-facing human readable error message.
+ Message string `json:"message,omitempty"`
+ // The status code.
+ Code StatusCode `json:"code,omitempty"`
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go
new file mode 100644
index 000000000..b03940708
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go
@@ -0,0 +1,189 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Traces represents the traces data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP traces data but do
+// not implement the OTLP protocol.
+//
+// The main difference between this message and collector protocol is that
+// in this message there will not be any "control" or "metadata" specific to
+// OTLP protocol.
+//
+// When new fields are added into this message, the OTLP request MUST be updated
+// as well.
+type Traces struct {
+ // An array of ResourceSpans.
+ // For data coming from a single resource this array will typically contain
+ // one element. Intermediary nodes that receive data from multiple origins
+ // typically batch the data before forwarding further and in that case this
+ // array will contain multiple elements.
+ ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td.
+func (td *Traces) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid TracesData type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid TracesData field: %#v", keyIface)
+ }
+
+ switch key {
+ case "resourceSpans", "resource_spans":
+ err = decoder.Decode(&td.ResourceSpans)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A collection of ScopeSpans from a Resource.
+type ResourceSpans struct {
+ // The resource for the spans in this message.
+ // If this field is not set then no resource info is known.
+ Resource Resource `json:"resource"`
+ // A list of ScopeSpans that originate from a resource.
+ ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"`
+ // This schema_url applies to the data in the "resource" field. It does not apply
+ // to the data in the "scope_spans" field which have their own schema_url field.
+ SchemaURL string `json:"schemaUrl,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs.
+func (rs *ResourceSpans) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid ResourceSpans type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface)
+ }
+
+ switch key {
+ case "resource":
+ err = decoder.Decode(&rs.Resource)
+ case "scopeSpans", "scope_spans":
+ err = decoder.Decode(&rs.ScopeSpans)
+ case "schemaUrl", "schema_url":
+ err = decoder.Decode(&rs.SchemaURL)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A collection of Spans produced by an InstrumentationScope.
+type ScopeSpans struct {
+ // The instrumentation scope information for the spans in this message.
+ // Semantically when InstrumentationScope isn't set, it is equivalent with
+ // an empty instrumentation scope name (unknown).
+ Scope *Scope `json:"scope"`
+ // A list of Spans that originate from an instrumentation scope.
+ Spans []*Span `json:"spans,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the span data
+ // is recorded in. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+ // This schema_url applies to all spans and span events in the "spans" field.
+ SchemaURL string `json:"schemaUrl,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss.
+func (ss *ScopeSpans) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid ScopeSpans type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface)
+ }
+
+ switch key {
+ case "scope":
+ err = decoder.Decode(&ss.Scope)
+ case "spans":
+ err = decoder.Decode(&ss.Spans)
+ case "schemaUrl", "schema_url":
+ err = decoder.Decode(&ss.SchemaURL)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
new file mode 100644
index 000000000..7251492da
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
@@ -0,0 +1,453 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "cmp"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "slices"
+ "strconv"
+ "unsafe"
+)
+
+// A Value represents a structured value.
+// A zero value is valid and represents an empty value.
+type Value struct {
+ // Ensure forward compatibility by explicitly making this not comparable.
+ noCmp [0]func() //nolint: unused // This is indeed used.
+
+ // num holds the value for Int64, Float64, and Bool. It holds the length
+ // for String, Bytes, Slice, Map.
+ num uint64
+ // any holds either the KindBool, KindInt64, KindFloat64, stringptr,
+ // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64
+ // then the value of Value is in num as described above. Otherwise, it
+ // contains the value wrapped in the appropriate type.
+ any any
+}
+
+type (
+ // sliceptr represents a value in Value.any for KindString Values.
+ stringptr *byte
+ // bytesptr represents a value in Value.any for KindBytes Values.
+ bytesptr *byte
+ // sliceptr represents a value in Value.any for KindSlice Values.
+ sliceptr *Value
+ // mapptr represents a value in Value.any for KindMap Values.
+ mapptr *Attr
+)
+
+// ValueKind is the kind of a [Value].
+type ValueKind int
+
+// ValueKind values.
+const (
+ ValueKindEmpty ValueKind = iota
+ ValueKindBool
+ ValueKindFloat64
+ ValueKindInt64
+ ValueKindString
+ ValueKindBytes
+ ValueKindSlice
+ ValueKindMap
+)
+
+var valueKindStrings = []string{
+ "Empty",
+ "Bool",
+ "Float64",
+ "Int64",
+ "String",
+ "Bytes",
+ "Slice",
+ "Map",
+}
+
+func (k ValueKind) String() string {
+ if k >= 0 && int(k) < len(valueKindStrings) {
+ return valueKindStrings[k]
+ }
+ return ""
+}
+
+// StringValue returns a new [Value] for a string.
+func StringValue(v string) Value {
+ return Value{
+ num: uint64(len(v)),
+ any: stringptr(unsafe.StringData(v)),
+ }
+}
+
+// IntValue returns a [Value] for an int.
+func IntValue(v int) Value { return Int64Value(int64(v)) }
+
+// Int64Value returns a [Value] for an int64.
+func Int64Value(v int64) Value {
+ return Value{
+ num: uint64(v), // nolint: gosec // Store raw bytes.
+ any: ValueKindInt64,
+ }
+}
+
+// Float64Value returns a [Value] for a float64.
+func Float64Value(v float64) Value {
+ return Value{num: math.Float64bits(v), any: ValueKindFloat64}
+}
+
+// BoolValue returns a [Value] for a bool.
+func BoolValue(v bool) Value { //nolint:revive // Not a control flag.
+ var n uint64
+ if v {
+ n = 1
+ }
+ return Value{num: n, any: ValueKindBool}
+}
+
+// BytesValue returns a [Value] for a byte slice. The passed slice must not be
+// changed after it is passed.
+func BytesValue(v []byte) Value {
+ return Value{
+ num: uint64(len(v)),
+ any: bytesptr(unsafe.SliceData(v)),
+ }
+}
+
+// SliceValue returns a [Value] for a slice of [Value]. The passed slice must
+// not be changed after it is passed.
+func SliceValue(vs ...Value) Value {
+ return Value{
+ num: uint64(len(vs)),
+ any: sliceptr(unsafe.SliceData(vs)),
+ }
+}
+
+// MapValue returns a new [Value] for a slice of key-value pairs. The passed
+// slice must not be changed after it is passed.
+func MapValue(kvs ...Attr) Value {
+ return Value{
+ num: uint64(len(kvs)),
+ any: mapptr(unsafe.SliceData(kvs)),
+ }
+}
+
+// AsString returns the value held by v as a string.
+func (v Value) AsString() string {
+ if sp, ok := v.any.(stringptr); ok {
+ return unsafe.String(sp, v.num)
+ }
+ // TODO: error handle
+ return ""
+}
+
+// asString returns the value held by v as a string. It will panic if the Value
+// is not KindString.
+func (v Value) asString() string {
+ return unsafe.String(v.any.(stringptr), v.num)
+}
+
+// AsInt64 returns the value held by v as an int64.
+func (v Value) AsInt64() int64 {
+ if v.Kind() != ValueKindInt64 {
+ // TODO: error handle
+ return 0
+ }
+ return v.asInt64()
+}
+
+// asInt64 returns the value held by v as an int64. If v is not of KindInt64,
+// this will return garbage.
+func (v Value) asInt64() int64 {
+ // Assumes v.num was a valid int64 (overflow not checked).
+ return int64(v.num) // nolint: gosec
+}
+
+// AsBool returns the value held by v as a bool.
+func (v Value) AsBool() bool {
+ if v.Kind() != ValueKindBool {
+ // TODO: error handle
+ return false
+ }
+ return v.asBool()
+}
+
+// asBool returns the value held by v as a bool. If v is not of KindBool, this
+// will return garbage.
+func (v Value) asBool() bool { return v.num == 1 }
+
+// AsFloat64 returns the value held by v as a float64.
+func (v Value) AsFloat64() float64 {
+ if v.Kind() != ValueKindFloat64 {
+ // TODO: error handle
+ return 0
+ }
+ return v.asFloat64()
+}
+
+// asFloat64 returns the value held by v as a float64. If v is not of
+// KindFloat64, this will return garbage.
+func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) }
+
+// AsBytes returns the value held by v as a []byte.
+func (v Value) AsBytes() []byte {
+ if sp, ok := v.any.(bytesptr); ok {
+ return unsafe.Slice((*byte)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asBytes returns the value held by v as a []byte. It will panic if the Value
+// is not KindBytes.
+func (v Value) asBytes() []byte {
+ return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num)
+}
+
+// AsSlice returns the value held by v as a []Value.
+func (v Value) AsSlice() []Value {
+ if sp, ok := v.any.(sliceptr); ok {
+ return unsafe.Slice((*Value)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asSlice returns the value held by v as a []Value. It will panic if the Value
+// is not KindSlice.
+func (v Value) asSlice() []Value {
+ return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num)
+}
+
+// AsMap returns the value held by v as a []Attr.
+func (v Value) AsMap() []Attr {
+ if sp, ok := v.any.(mapptr); ok {
+ return unsafe.Slice((*Attr)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asMap returns the value held by v as a []Attr. It will panic if the
+// Value is not KindMap.
+func (v Value) asMap() []Attr {
+ return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num)
+}
+
+// Kind returns the Kind of v.
+func (v Value) Kind() ValueKind {
+ switch x := v.any.(type) {
+ case ValueKind:
+ return x
+ case stringptr:
+ return ValueKindString
+ case bytesptr:
+ return ValueKindBytes
+ case sliceptr:
+ return ValueKindSlice
+ case mapptr:
+ return ValueKindMap
+ default:
+ return ValueKindEmpty
+ }
+}
+
+// Empty returns if v does not hold any value.
+func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty }
+
+// Equal returns if v is equal to w.
+func (v Value) Equal(w Value) bool {
+ k1 := v.Kind()
+ k2 := w.Kind()
+ if k1 != k2 {
+ return false
+ }
+ switch k1 {
+ case ValueKindInt64, ValueKindBool:
+ return v.num == w.num
+ case ValueKindString:
+ return v.asString() == w.asString()
+ case ValueKindFloat64:
+ return v.asFloat64() == w.asFloat64()
+ case ValueKindSlice:
+ return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal)
+ case ValueKindMap:
+ sv := sortMap(v.asMap())
+ sw := sortMap(w.asMap())
+ return slices.EqualFunc(sv, sw, Attr.Equal)
+ case ValueKindBytes:
+ return bytes.Equal(v.asBytes(), w.asBytes())
+ case ValueKindEmpty:
+ return true
+ default:
+ // TODO: error handle
+ return false
+ }
+}
+
+func sortMap(m []Attr) []Attr {
+ sm := make([]Attr, len(m))
+ copy(sm, m)
+ slices.SortFunc(sm, func(a, b Attr) int {
+ return cmp.Compare(a.Key, b.Key)
+ })
+
+ return sm
+}
+
+// String returns Value's value as a string, formatted like [fmt.Sprint].
+//
+// The returned string is meant for debugging;
+// the string representation is not stable.
+func (v Value) String() string {
+ switch v.Kind() {
+ case ValueKindString:
+ return v.asString()
+ case ValueKindInt64:
+ // Assumes v.num was a valid int64 (overflow not checked).
+ return strconv.FormatInt(int64(v.num), 10) // nolint: gosec
+ case ValueKindFloat64:
+ return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
+ case ValueKindBool:
+ return strconv.FormatBool(v.asBool())
+ case ValueKindBytes:
+ return fmt.Sprint(v.asBytes())
+ case ValueKindMap:
+ return fmt.Sprint(v.asMap())
+ case ValueKindSlice:
+ return fmt.Sprint(v.asSlice())
+ case ValueKindEmpty:
+ return ""
+ default:
+ // Try to handle this as gracefully as possible.
+ //
+ // Don't panic here. The goal here is to have developers find this
+ // first if a slog.Kind is is not handled. It is
+ // preferable to have user's open issue asking why their attributes
+ // have a "unhandled: " prefix than say that their code is panicking.
+ return fmt.Sprintf("", v.Kind())
+ }
+}
+
+// MarshalJSON encodes v into OTLP formatted JSON.
+func (v *Value) MarshalJSON() ([]byte, error) {
+ switch v.Kind() {
+ case ValueKindString:
+ return json.Marshal(struct {
+ Value string `json:"stringValue"`
+ }{v.asString()})
+ case ValueKindInt64:
+ return json.Marshal(struct {
+ Value string `json:"intValue"`
+ }{strconv.FormatInt(int64(v.num), 10)}) // nolint: gosec // From raw bytes.
+ case ValueKindFloat64:
+ return json.Marshal(struct {
+ Value float64 `json:"doubleValue"`
+ }{v.asFloat64()})
+ case ValueKindBool:
+ return json.Marshal(struct {
+ Value bool `json:"boolValue"`
+ }{v.asBool()})
+ case ValueKindBytes:
+ return json.Marshal(struct {
+ Value []byte `json:"bytesValue"`
+ }{v.asBytes()})
+ case ValueKindMap:
+ return json.Marshal(struct {
+ Value struct {
+ Values []Attr `json:"values"`
+ } `json:"kvlistValue"`
+ }{struct {
+ Values []Attr `json:"values"`
+ }{v.asMap()}})
+ case ValueKindSlice:
+ return json.Marshal(struct {
+ Value struct {
+ Values []Value `json:"values"`
+ } `json:"arrayValue"`
+ }{struct {
+ Values []Value `json:"values"`
+ }{v.asSlice()}})
+ case ValueKindEmpty:
+ return nil, nil
+ default:
+ return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String())
+ }
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v.
+func (v *Value) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Value type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Value key: %#v", keyIface)
+ }
+
+ switch key {
+ case "stringValue", "string_value":
+ var val string
+ err = decoder.Decode(&val)
+ *v = StringValue(val)
+ case "boolValue", "bool_value":
+ var val bool
+ err = decoder.Decode(&val)
+ *v = BoolValue(val)
+ case "intValue", "int_value":
+ var val protoInt64
+ err = decoder.Decode(&val)
+ *v = Int64Value(val.Int64())
+ case "doubleValue", "double_value":
+ var val float64
+ err = decoder.Decode(&val)
+ *v = Float64Value(val)
+ case "bytesValue", "bytes_value":
+ var val64 string
+ if err := decoder.Decode(&val64); err != nil {
+ return err
+ }
+ var val []byte
+ val, err = base64.StdEncoding.DecodeString(val64)
+ *v = BytesValue(val)
+ case "arrayValue", "array_value":
+ var val struct{ Values []Value }
+ err = decoder.Decode(&val)
+ *v = SliceValue(val.Values...)
+ case "kvlistValue", "kvlist_value":
+ var val struct{ Values []Attr }
+ err = decoder.Decode(&val)
+ *v = MapValue(val.Values...)
+ default:
+ // Skip unknown.
+ continue
+ }
+ // Use first valid. Ignore the rest.
+ return err
+ }
+
+ // Only unknown fields. Return nil without unmarshaling any value.
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go
index ca20e9997..c8b1ae5d6 100644
--- a/vendor/go.opentelemetry.io/otel/trace/noop.go
+++ b/vendor/go.opentelemetry.io/otel/trace/noop.go
@@ -82,4 +82,22 @@ func (noopSpan) AddLink(Link) {}
func (noopSpan) SetName(string) {}
// TracerProvider returns a no-op TracerProvider.
-func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} }
+func (s noopSpan) TracerProvider() TracerProvider {
+ return s.tracerProvider(autoInstEnabled)
+}
+
+// autoInstEnabled defines if the auto-instrumentation SDK is enabled.
+//
+// The auto-instrumentation is expected to overwrite this value to true when it
+// attaches to the process.
+var autoInstEnabled = new(bool)
+
+// tracerProvider return a noopTracerProvider if autoEnabled is false,
+// otherwise it will return a TracerProvider from the sdk package used in
+// auto-instrumentation.
+func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider {
+ if *autoEnabled {
+ return newAutoTracerProvider()
+ }
+ return noopTracerProvider{}
+}
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
index fb7d12673..d5fa71f67 100644
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.33.0"
+ return "1.35.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
index 9f878cd1f..2b4cb4b41 100644
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -3,7 +3,7 @@
module-sets:
stable-v1:
- version: v1.33.0
+ version: v1.35.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opencensus
@@ -23,11 +23,11 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
- version: v0.55.0
+ version: v0.57.0
modules:
- go.opentelemetry.io/otel/exporters/prometheus
experimental-logs:
- version: v0.9.0
+ version: v0.11.0
modules:
- go.opentelemetry.io/otel/log
- go.opentelemetry.io/otel/sdk/log
@@ -40,3 +40,4 @@ module-sets:
- go.opentelemetry.io/otel/schema
excluded-modules:
- go.opentelemetry.io/otel/internal/tools
+ - go.opentelemetry.io/otel/trace/internal/telemetry/test
diff --git a/vendor/go.yaml.in/yaml/v2/.travis.yml b/vendor/go.yaml.in/yaml/v2/.travis.yml
new file mode 100644
index 000000000..7348c50c0
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+
+go:
+ - "1.4.x"
+ - "1.5.x"
+ - "1.6.x"
+ - "1.7.x"
+ - "1.8.x"
+ - "1.9.x"
+ - "1.10.x"
+ - "1.11.x"
+ - "1.12.x"
+ - "1.13.x"
+ - "1.14.x"
+ - "tip"
+
+go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE b/vendor/go.yaml.in/yaml/v2/LICENSE
similarity index 100%
rename from vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE
rename to vendor/go.yaml.in/yaml/v2/LICENSE
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml b/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml
rename to vendor/go.yaml.in/yaml/v2/LICENSE.libyaml
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/vendor/go.yaml.in/yaml/v2/NOTICE
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE
rename to vendor/go.yaml.in/yaml/v2/NOTICE
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md b/vendor/go.yaml.in/yaml/v2/README.md
similarity index 76%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
rename to vendor/go.yaml.in/yaml/v2/README.md
index 53f4139dc..c9388da42 100644
--- a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
+++ b/vendor/go.yaml.in/yaml/v2/README.md
@@ -1,13 +1,3 @@
-# go-yaml fork
-
-This package is a fork of the go-yaml library and is intended solely for consumption
-by kubernetes projects. In this fork, we plan to support only critical changes required for
-kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests
-should be made in the upstream go-yaml library, and we will reject such changes in this fork
-unless we are pulling them from upstream.
-
-This fork is based on v2.4.0: https://github.com/go-yaml/yaml/releases/tag/v2.4.0
-
# YAML support for the Go language
Introduction
@@ -30,18 +20,16 @@ supported since they're a poor design and are gone in YAML 1.2.
Installation and usage
----------------------
-The import path for the package is *gopkg.in/yaml.v2*.
+The import path for the package is *go.yaml.in/yaml/v2*.
To install it, run:
- go get gopkg.in/yaml.v2
+ go get go.yaml.in/yaml/v2
API documentation
-----------------
-If opened in a browser, the import path itself leads to the API documentation:
-
- * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+See:
API stability
-------------
@@ -65,7 +53,7 @@ import (
"fmt"
"log"
- "gopkg.in/yaml.v2"
+ "go.yaml.in/yaml/v2"
)
var data = `
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/vendor/go.yaml.in/yaml/v2/apic.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go
rename to vendor/go.yaml.in/yaml/v2/apic.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/vendor/go.yaml.in/yaml/v2/decode.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go
rename to vendor/go.yaml.in/yaml/v2/decode.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/vendor/go.yaml.in/yaml/v2/emitterc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go
rename to vendor/go.yaml.in/yaml/v2/emitterc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/vendor/go.yaml.in/yaml/v2/encode.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go
rename to vendor/go.yaml.in/yaml/v2/encode.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/vendor/go.yaml.in/yaml/v2/parserc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go
rename to vendor/go.yaml.in/yaml/v2/parserc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/vendor/go.yaml.in/yaml/v2/readerc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go
rename to vendor/go.yaml.in/yaml/v2/readerc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/vendor/go.yaml.in/yaml/v2/resolve.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go
rename to vendor/go.yaml.in/yaml/v2/resolve.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/vendor/go.yaml.in/yaml/v2/scannerc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go
rename to vendor/go.yaml.in/yaml/v2/scannerc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/vendor/go.yaml.in/yaml/v2/sorter.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go
rename to vendor/go.yaml.in/yaml/v2/sorter.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/vendor/go.yaml.in/yaml/v2/writerc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go
rename to vendor/go.yaml.in/yaml/v2/writerc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go b/vendor/go.yaml.in/yaml/v2/yaml.go
similarity index 99%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go
rename to vendor/go.yaml.in/yaml/v2/yaml.go
index 30813884c..5248e1263 100644
--- a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go
+++ b/vendor/go.yaml.in/yaml/v2/yaml.go
@@ -2,7 +2,7 @@
//
// Source code and other details for the project are available at GitHub:
//
-// https://github.com/go-yaml/yaml
+// https://github.com/yaml/go-yaml
//
package yaml
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/vendor/go.yaml.in/yaml/v2/yamlh.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go
rename to vendor/go.yaml.in/yaml/v2/yamlh.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/vendor/go.yaml.in/yaml/v2/yamlprivateh.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go
rename to vendor/go.yaml.in/yaml/v2/yamlprivateh.go
diff --git a/vendor/go.yaml.in/yaml/v3/LICENSE b/vendor/go.yaml.in/yaml/v3/LICENSE
new file mode 100644
index 000000000..2683e4bb1
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/LICENSE
@@ -0,0 +1,50 @@
+
+This project is covered by two different licenses: MIT and Apache.
+
+#### MIT License ####
+
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original MIT license, with the additional
+copyright staring in 2011 when the project was ported over:
+
+ apic.go emitterc.go parserc.go readerc.go scannerc.go
+ writerc.go yamlh.go yamlprivateh.go
+
+Copyright (c) 2006-2010 Kirill Simonov
+Copyright (c) 2006-2011 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+### Apache License ###
+
+All the remaining project files are covered by the Apache license:
+
+Copyright (c) 2011-2019 Canonical Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/doc.go b/vendor/go.yaml.in/yaml/v3/NOTICE
similarity index 76%
rename from vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/doc.go
rename to vendor/go.yaml.in/yaml/v3/NOTICE
index 16f443990..866d74a7a 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/doc.go
+++ b/vendor/go.yaml.in/yaml/v3/NOTICE
@@ -1,5 +1,4 @@
-/*
-Copyright The Kubernetes Authors.
+Copyright 2011-2016 Canonical Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,9 +11,3 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-// Package fake has the automatically generated clients.
-package fake
diff --git a/vendor/go.yaml.in/yaml/v3/README.md b/vendor/go.yaml.in/yaml/v3/README.md
new file mode 100644
index 000000000..15a85a635
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/README.md
@@ -0,0 +1,171 @@
+go.yaml.in/yaml
+===============
+
+YAML Support for the Go Language
+
+
+## Introduction
+
+The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode
+and decode [YAML](https://yaml.org/) values.
+
+It was originally developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go
+port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to
+parse and generate YAML data quickly and reliably.
+
+
+## Project Status
+
+This project started as a fork of the extremely popular [go-yaml](
+https://github.com/go-yaml/yaml/)
+project, and is being maintained by the official [YAML organization](
+https://github.com/yaml/).
+
+The YAML team took over ongoing maintenance and development of the project after
+discussion with go-yaml's author, @niemeyer, following his decision to
+[label the project repository as "unmaintained"](
+https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025.
+
+We have put together a team of dedicated maintainers including representatives
+of go-yaml's most important downstream projects.
+
+We will strive to earn the trust of the various go-yaml forks to switch back to
+this repository as their upstream.
+
+Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you
+would like to contribute or be involved.
+
+
+## Compatibility
+
+The `yaml` package supports most of YAML 1.2, but preserves some behavior from
+1.1 for backwards compatibility.
+
+Specifically, v3 of the `yaml` package:
+
+* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being
+ decoded into a typed bool value.
+ Otherwise they behave as a string.
+ Booleans in YAML 1.2 are `true`/`false` only.
+* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than
+ `0o777` as specified in YAML 1.2, because most parsers still use the old
+ format.
+ Octals in the `0o777` format are supported though, so new files work.
+* Does not support base-60 floats.
+ These are gone from YAML 1.2, and were actually never supported by this
+ package as it's clearly a poor choice.
+
+
+## Installation and Usage
+
+The import path for the package is *go.yaml.in/yaml/v3*.
+
+To install it, run:
+
+```bash
+go get go.yaml.in/yaml/v3
+```
+
+
+## API Documentation
+
+See:
+
+
+## API Stability
+
+The package API for yaml v3 will remain stable as described in [gopkg.in](
+https://gopkg.in).
+
+
+## Example
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "go.yaml.in/yaml/v3"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
+
+## License
+
+The yaml package is licensed under the MIT and Apache License 2.0 licenses.
+Please see the LICENSE file for details.
diff --git a/vendor/go.yaml.in/yaml/v3/apic.go b/vendor/go.yaml.in/yaml/v3/apic.go
new file mode 100644
index 000000000..05fd305da
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/apic.go
@@ -0,0 +1,747 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ best_width: -1,
+ }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+// Create ALIAS.
+func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ anchor: anchor,
+ }
+ return true
+}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/go.yaml.in/yaml/v3/decode.go b/vendor/go.yaml.in/yaml/v3/decode.go
new file mode 100644
index 000000000..02e2b17bf
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/decode.go
@@ -0,0 +1,1018 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *Node
+ anchors map[string]*Node
+ doneInit bool
+ textless bool
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+ yaml_parser_set_input_string(&p.parser, b)
+ return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ yaml_parser_set_input_reader(&p.parser, r)
+ return &p
+}
+
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.anchors = make(map[string]*Node)
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
+ }
+ // It's curious choice from the underlying API to generally return a
+ // positive result on success, but on this case return true in an error
+ // scenario. This was the source of bugs in the past (issue #666).
+ if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR {
+ p.fail()
+ }
+ return p.event.typ
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ } else if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *Node, anchor []byte) {
+ if anchor != nil {
+ n.Anchor = string(anchor)
+ p.anchors[n.Anchor] = n
+ }
+}
+
+func (p *parser) parse() *Node {
+ p.init()
+ switch p.peek() {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ case yaml_TAIL_COMMENT_EVENT:
+ panic("internal error: unexpected tail comment event (please report)")
+ default:
+ panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String())
+ }
+}
+
+func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node {
+ var style Style
+ if tag != "" && tag != "!" {
+ tag = shortTag(tag)
+ style = TaggedStyle
+ } else if defaultTag != "" {
+ tag = defaultTag
+ } else if kind == ScalarNode {
+ tag, _ = resolve("", value)
+ }
+ n := &Node{
+ Kind: kind,
+ Tag: tag,
+ Value: value,
+ Style: style,
+ }
+ if !p.textless {
+ n.Line = p.event.start_mark.line + 1
+ n.Column = p.event.start_mark.column + 1
+ n.HeadComment = string(p.event.head_comment)
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ }
+ return n
+}
+
+func (p *parser) parseChild(parent *Node) *Node {
+ child := p.parse()
+ parent.Content = append(parent.Content, child)
+ return child
+}
+
+func (p *parser) document() *Node {
+ n := p.node(DocumentNode, "", "", "")
+ p.doc = n
+ p.expect(yaml_DOCUMENT_START_EVENT)
+ p.parseChild(n)
+ if p.peek() == yaml_DOCUMENT_END_EVENT {
+ n.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_DOCUMENT_END_EVENT)
+ return n
+}
+
+func (p *parser) alias() *Node {
+ n := p.node(AliasNode, "", "", string(p.event.anchor))
+ n.Alias = p.anchors[n.Value]
+ if n.Alias == nil {
+ failf("unknown anchor '%s' referenced", n.Value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
+ return n
+}
+
+func (p *parser) scalar() *Node {
+ var parsedStyle = p.event.scalar_style()
+ var nodeStyle Style
+ switch {
+ case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = DoubleQuotedStyle
+ case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = SingleQuotedStyle
+ case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0:
+ nodeStyle = LiteralStyle
+ case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0:
+ nodeStyle = FoldedStyle
+ }
+ var nodeValue = string(p.event.value)
+ var nodeTag = string(p.event.tag)
+ var defaultTag string
+ if nodeStyle == 0 {
+ if nodeValue == "<<" {
+ defaultTag = mergeTag
+ }
+ } else {
+ defaultTag = strTag
+ }
+ n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue)
+ n.Style |= nodeStyle
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SCALAR_EVENT)
+ return n
+}
+
+func (p *parser) sequence() *Node {
+ n := p.node(SequenceNode, seqTag, string(p.event.tag), "")
+ if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 {
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
+ p.parseChild(n)
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ p.expect(yaml_SEQUENCE_END_EVENT)
+ return n
+}
+
+func (p *parser) mapping() *Node {
+ n := p.node(MappingNode, mapTag, string(p.event.tag), "")
+ block := true
+ if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 {
+ block = false
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
+ k := p.parseChild(n)
+ if block && k.FootComment != "" {
+ // Must be a foot comment for the prior value when being dedented.
+ if len(n.Content) > 2 {
+ n.Content[len(n.Content)-3].FootComment = k.FootComment
+ k.FootComment = ""
+ }
+ }
+ v := p.parseChild(n)
+ if k.FootComment == "" && v.FootComment != "" {
+ k.FootComment = v.FootComment
+ v.FootComment = ""
+ }
+ if p.peek() == yaml_TAIL_COMMENT_EVENT {
+ if k.FootComment == "" {
+ k.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_TAIL_COMMENT_EVENT)
+ }
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 {
+ n.Content[len(n.Content)-2].FootComment = n.FootComment
+ n.FootComment = ""
+ }
+ p.expect(yaml_MAPPING_END_EVENT)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *Node
+ aliases map[*Node]bool
+ terrors []string
+
+ stringMapType reflect.Type
+ generalMapType reflect.Type
+
+ knownFields bool
+ uniqueKeys bool
+ decodeCount int
+ aliasCount int
+ aliasDepth int
+
+ mergedFields map[interface{}]bool
+}
+
+var (
+ nodeType = reflect.TypeOf(Node{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ stringMapType = reflect.TypeOf(map[string]interface{}{})
+ generalMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = generalMapType.Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder() *decoder {
+ d := &decoder{
+ stringMapType: stringMapType,
+ generalMapType: generalMapType,
+ uniqueKeys: true,
+ }
+ d.aliases = make(map[*Node]bool)
+ return d
+}
+
+func (d *decoder) terror(n *Node, tag string, out reflect.Value) {
+ if n.Tag != "" {
+ tag = n.Tag
+ }
+ value := n.Value
+ if tag != seqTag && tag != mapTag {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) {
+ err := u.UnmarshalYAML(n)
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.ShortTag() == nullTag {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ outi := out.Addr().Interface()
+ if u, ok := outi.(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ if u, ok := outi.(obsoleteUnmarshaler); ok {
+ good = d.callObsoleteUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) {
+ if n.ShortTag() == nullTag {
+ return reflect.Value{}
+ }
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+const (
+ // 400,000 decode operations is ~500kb of dense object declarations, or
+ // ~5kb of dense object declarations with 10000% alias expansion
+ alias_ratio_range_low = 400000
+
+ // 4,000,000 decode operations is ~5MB of dense object declarations, or
+ // ~4.5MB of dense object declarations with 10% alias expansion
+ alias_ratio_range_high = 4000000
+
+ // alias_ratio_range is the range over which we scale allowed alias ratios
+ alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+ switch {
+ case decodeCount <= alias_ratio_range_low:
+ // allow 99% to come from alias expansion for small-to-medium documents
+ return 0.99
+ case decodeCount >= alias_ratio_range_high:
+ // allow 10% to come from alias expansion for very large documents
+ return 0.10
+ default:
+ // scale smoothly from 99% down to 10% over the range.
+ // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+ // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+ return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+ }
+}
+
+func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) {
+ d.decodeCount++
+ if d.aliasDepth > 0 {
+ d.aliasCount++
+ }
+ if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+ failf("document contains excessive aliasing")
+ }
+ if out.Type() == nodeType {
+ out.Set(reflect.ValueOf(n).Elem())
+ return true
+ }
+ switch n.Kind {
+ case DocumentNode:
+ return d.document(n, out)
+ case AliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.Kind {
+ case ScalarNode:
+ good = d.scalar(n, out)
+ case MappingNode:
+ good = d.mapping(n, out)
+ case SequenceNode:
+ good = d.sequence(n, out)
+ case 0:
+ if n.IsZero() {
+ return d.null(out)
+ }
+ fallthrough
+ default:
+ failf("cannot decode node with unknown kind %d", n.Kind)
+ }
+ return good
+}
+
+func (d *decoder) document(n *Node, out reflect.Value) (good bool) {
+ if len(n.Content) == 1 {
+ d.doc = n
+ d.unmarshal(n.Content[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *Node, out reflect.Value) (good bool) {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
+ failf("anchor '%s' value contains itself", n.Value)
+ }
+ d.aliases[n] = true
+ d.aliasDepth++
+ good = d.unmarshal(n.Alias, out)
+ d.aliasDepth--
+ delete(d.aliases, n)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) null(out reflect.Value) bool {
+ if out.CanAddr() {
+ switch out.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ out.Set(reflect.Zero(out.Type()))
+ return true
+ }
+ }
+ return false
+}
+
+func (d *decoder) scalar(n *Node, out reflect.Value) bool {
+ var tag string
+ var resolved interface{}
+ if n.indicatedString() {
+ tag = strTag
+ resolved = n.Value
+ } else {
+ tag, resolved = resolve(n.Tag, n.Value)
+ if tag == binaryTag {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ return d.null(out)
+ }
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ // We've resolved to exactly the type we want, so use that.
+ out.Set(resolvedv)
+ return true
+ }
+ // Perhaps we can use the value as a TextUnmarshaler to
+ // set its value.
+ if out.CanAddr() {
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+ if ok {
+ var text []byte
+ if tag == binaryTag {
+ text = []byte(resolved.(string))
+ } else {
+ // We let any value be unmarshaled into TextUnmarshaler.
+ // That might be more lax than we'd like, but the
+ // TextUnmarshaler itself should bowl out any dubious values.
+ text = []byte(n.Value)
+ }
+ err := u.UnmarshalText(text)
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == binaryTag {
+ out.SetString(resolved.(string))
+ return true
+ }
+ out.SetString(n.Value)
+ return true
+ case reflect.Interface:
+ out.Set(reflect.ValueOf(resolved))
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ // This used to work in v2, but it's very unfriendly.
+ isDuration := out.Type() == durationType
+
+ switch resolved := resolved.(type) {
+ case int:
+ if !isDuration && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case int64:
+ if !isDuration && !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ return true
+ }
+ case uint64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case float64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ return true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ return true
+ case string:
+ // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html).
+ // It only works if explicitly attempting to unmarshal into a typed bool value.
+ switch resolved {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON":
+ out.SetBool(true)
+ return true
+ case "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ out.SetBool(false)
+ return true
+ }
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ return true
+ case int64:
+ out.SetFloat(float64(resolved))
+ return true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ return true
+ case float64:
+ out.SetFloat(resolved)
+ return true
+ }
+ case reflect.Struct:
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ out.Set(resolvedv)
+ return true
+ }
+ case reflect.Ptr:
+ panic("yaml internal error: please report the issue")
+ }
+ d.terror(n, tag, out)
+ return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Array:
+ if l != out.Len() {
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
+ }
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, seqTag, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.Content[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ if out.Kind() != reflect.Array {
+ out.Set(out.Slice(0, j))
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+ if d.uniqueKeys {
+ nerrs := len(d.terrors)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ for j := i + 2; j < l; j += 2 {
+ nj := n.Content[j]
+ if ni.Kind == nj.Kind && ni.Value == nj.Value {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line))
+ }
+ }
+ }
+ if len(d.terrors) > nerrs {
+ return false
+ }
+ }
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ iface := out
+ if isStringMap(n) {
+ out = reflect.MakeMap(d.stringMapType)
+ } else {
+ out = reflect.MakeMap(d.generalMapType)
+ }
+ iface.Set(out)
+ default:
+ d.terror(n, mapTag, out)
+ return false
+ }
+
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ stringMapType := d.stringMapType
+ generalMapType := d.generalMapType
+ if outt.Elem() == ifaceType {
+ if outt.Key().Kind() == reflect.String {
+ d.stringMapType = outt
+ } else if outt.Key() == ifaceType {
+ d.generalMapType = outt
+ }
+ }
+
+ mergedFields := d.mergedFields
+ d.mergedFields = nil
+
+ var mergeNode *Node
+
+ mapIsNew := false
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ mapIsNew = true
+ }
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.Content[i]) {
+ mergeNode = n.Content[i+1]
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.Content[i], k) {
+ if mergedFields != nil {
+ ki := k.Interface()
+ if d.getPossiblyUnhashableKey(mergedFields, ki) {
+ continue
+ }
+ d.setPossiblyUnhashableKey(mergedFields, ki, true)
+ }
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+
+ d.mergedFields = mergedFields
+ if mergeNode != nil {
+ d.merge(n, mergeNode, out)
+ }
+
+ d.stringMapType = stringMapType
+ d.generalMapType = generalMapType
+ return true
+}
+
+func isStringMap(n *Node) bool {
+ if n.Kind != MappingNode {
+ return false
+ }
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ shortTag := n.Content[i].ShortTag()
+ if shortTag != strTag && shortTag != mergeTag {
+ return false
+ }
+ }
+ return true
+}
+
+func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ elemType = inlineMap.Type().Elem()
+ }
+
+ for _, index := range sinfo.InlineUnmarshalers {
+ field := d.fieldByIndex(n, out, index)
+ d.prepare(n, field)
+ }
+
+ mergedFields := d.mergedFields
+ d.mergedFields = nil
+ var mergeNode *Node
+ var doneFields []bool
+ if d.uniqueKeys {
+ doneFields = make([]bool, len(sinfo.FieldsList))
+ }
+ name := settableValueOf("")
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ if isMerge(ni) {
+ mergeNode = n.Content[i+1]
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ sname := name.String()
+ if mergedFields != nil {
+ if mergedFields[sname] {
+ continue
+ }
+ mergedFields[sname] = true
+ }
+ if info, ok := sinfo.FieldsMap[sname]; ok {
+ if d.uniqueKeys {
+ if doneFields[info.Id] {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
+ continue
+ }
+ doneFields[info.Id] = true
+ }
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = d.fieldByIndex(n, out, info.Inline)
+ }
+ d.unmarshal(n.Content[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.Content[i+1], value)
+ inlineMap.SetMapIndex(name, value)
+ } else if d.knownFields {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
+ }
+ }
+
+ d.mergedFields = mergedFields
+ if mergeNode != nil {
+ d.merge(n, mergeNode, out)
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) setPossiblyUnhashableKey(m map[interface{}]bool, key interface{}, value bool) {
+ defer func() {
+ if err := recover(); err != nil {
+ failf("%v", err)
+ }
+ }()
+ m[key] = value
+}
+
+func (d *decoder) getPossiblyUnhashableKey(m map[interface{}]bool, key interface{}) bool {
+ defer func() {
+ if err := recover(); err != nil {
+ failf("%v", err)
+ }
+ }()
+ return m[key]
+}
+
+func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) {
+ mergedFields := d.mergedFields
+ if mergedFields == nil {
+ d.mergedFields = make(map[interface{}]bool)
+ for i := 0; i < len(parent.Content); i += 2 {
+ k := reflect.New(ifaceType).Elem()
+ if d.unmarshal(parent.Content[i], k) {
+ d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true)
+ }
+ }
+ }
+
+ switch merge.Kind {
+ case MappingNode:
+ d.unmarshal(merge, out)
+ case AliasNode:
+ if merge.Alias != nil && merge.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(merge, out)
+ case SequenceNode:
+ for i := 0; i < len(merge.Content); i++ {
+ ni := merge.Content[i]
+ if ni.Kind == AliasNode {
+ if ni.Alias != nil && ni.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ } else if ni.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+
+ d.mergedFields = mergedFields
+}
+
+func isMerge(n *Node) bool {
+ return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag)
+}
diff --git a/vendor/go.yaml.in/yaml/v3/emitterc.go b/vendor/go.yaml.in/yaml/v3/emitterc.go
new file mode 100644
index 000000000..ab4e03ba7
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/emitterc.go
@@ -0,0 +1,2054 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and below and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and above and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent_compact(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ // [Go] This was changed so that indentations are more regular.
+ if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE {
+ // The first indent inside a sequence will just skip the "- " indicator.
+ emitter.indent += 2
+ } else {
+ // Everything else aligns to the chosen indentation.
+ emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent)
+ if compact_seq {
+ // The value compact_seq passed in is almost always set to `false` when this function is called,
+ // except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we
+ // are increasing the indent to account for sequence nodes, which will be correct because we need to
+ // subtract 2 to account for the - at the beginning of the sequence node.
+ emitter.indent = emitter.indent - 2
+ }
+ }
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+ emitter.space_above = true
+ emitter.foot_indent = -1
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical || true {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if len(emitter.head_comment) > 0 {
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// yaml_emitter_increase_indent preserves the original signature and delegates to
+// yaml_emitter_increase_indent_compact without compact-sequence indentation
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false)
+}
+
+// yaml_emitter_process_line_comment preserves the original signature and delegates to
+// yaml_emitter_process_line_comment_linebreak passing false for linebreak
+func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
+ return yaml_emitter_process_line_comment_linebreak(emitter, false)
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_emit_node(emitter, event, true, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ // [Go] Force document foot separation.
+ emitter.foot_indent = 0
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.foot_indent = -1
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ if emitter.canonical && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.column == 0 || emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ // emitter.mapping context tells us if we are currently in a mapping context.
+ // emiiter.column tells us which column we are in in the yaml output. 0 is the first char of the column.
+ // emitter.indentation tells us if the last character was an indentation character.
+ // emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements.
+ // So, `seq` means that we are in a mapping context, and we are either at the first char of the column or
+ // the last character was not an indentation character, and we consider '- ' part of the indentation
+ // for sequence elements.
+ seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) &&
+ emitter.compact_sequence_indent
+ if !yaml_emitter_increase_indent_compact(emitter, false, false, seq) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if len(emitter.line_comment) > 0 {
+ // [Go] A line comment was provided for the key. That's unusual as the
+ // scanner associates line comments with the value. Either way,
+ // save the line comment and render it appropriately later.
+ emitter.key_line_comment = emitter.line_comment
+ emitter.line_comment = nil
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ if len(emitter.key_line_comment) > 0 {
+ // [Go] Line comments are generally associated with the value, but when there's
+ // no value on the same line as a mapping key they end up attached to the
+ // key itself.
+ if event.typ == yaml_SCALAR_EVENT {
+ if len(emitter.line_comment) == 0 {
+ // A scalar is coming and it has no line comments by itself yet,
+ // so just let it handle the line comment as usual. If it has a
+ // line comment, we can't have both so the one from the key is lost.
+ emitter.line_comment = emitter.key_line_comment
+ emitter.key_line_comment = nil
+ }
+ } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) {
+ // An indented block follows, so write the comment right now.
+ emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Write a head comment.
+func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.tail_comment) {
+ return false
+ }
+ emitter.tail_comment = emitter.tail_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ }
+
+ if len(emitter.head_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.head_comment) {
+ return false
+ }
+ emitter.head_comment = emitter.head_comment[:0]
+ return true
+}
+
+// Write an line comment.
+func yaml_emitter_process_line_comment_linebreak(emitter *yaml_emitter_t, linebreak bool) bool {
+ if len(emitter.line_comment) == 0 {
+ // The next 3 lines are needed to resolve an issue with leading newlines
+ // See https://github.com/go-yaml/yaml/issues/755
+ // When linebreak is set to true, put_break will be called and will add
+ // the needed newline.
+ if linebreak && !put_break(emitter) {
+ return false
+ }
+ return true
+ }
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.line_comment) {
+ return false
+ }
+ emitter.line_comment = emitter.line_comment[:0]
+ return true
+}
+
+// Write a foot comment.
+func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.foot_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.foot_comment) {
+ return false
+ }
+ emitter.foot_comment = emitter.foot_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ return true
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+ tab_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if value[i] == '\t' {
+ tab_characters = true
+ } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || tab_characters || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ if len(event.head_comment) > 0 {
+ emitter.head_comment = event.head_comment
+ }
+ if len(event.line_comment) > 0 {
+ emitter.line_comment = event.line_comment
+ }
+ if len(event.foot_comment) > 0 {
+ emitter.foot_comment = event.foot_comment
+ }
+ if len(event.tail_comment) > 0 {
+ emitter.tail_comment = event.tail_comment
+ }
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if emitter.foot_indent == indent {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ //emitter.indention = true
+ emitter.space_above = false
+ emitter.foot_indent = -1
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if len(value) > 0 && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ if len(value) > 0 {
+ emitter.whitespace = false
+ }
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment_linebreak(emitter, true) {
+ return false
+ }
+ //emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment_linebreak(emitter, true) {
+ return false
+ }
+
+ //emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool {
+ breaks := false
+ pound := false
+ for i := 0; i < len(comment); {
+ if is_break(comment, i) {
+ if !write_break(emitter, comment, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ pound = false
+ } else {
+ if breaks && !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !pound {
+ if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) {
+ return false
+ }
+ pound = true
+ }
+ if !write(emitter, comment, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ if !breaks && !put_break(emitter) {
+ return false
+ }
+
+ emitter.whitespace = true
+ //emitter.indention = true
+ return true
+}
diff --git a/vendor/go.yaml.in/yaml/v3/encode.go b/vendor/go.yaml.in/yaml/v3/encode.go
new file mode 100644
index 000000000..de9e72a3e
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/encode.go
@@ -0,0 +1,577 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+ indent int
+ doneInit bool
+}
+
+func newEncoder() *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, w)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func (e *encoder) init() {
+ if e.doneInit {
+ return
+ }
+ if e.indent == 0 {
+ e.indent = 4
+ }
+ e.emitter.best_indent = e.indent
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+ e.emit()
+ e.doneInit = true
+}
+
+func (e *encoder) finish() {
+ e.emitter.open_ended = false
+ yaml_stream_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+ e.init()
+ var node *Node
+ if in.IsValid() {
+ node, _ = in.Interface().(*Node)
+ }
+ if node != nil && node.Kind == DocumentNode {
+ e.nodev(in)
+ } else {
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+ e.marshal(tag, in)
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ tag = shortTag(tag)
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ switch value := iface.(type) {
+ case *Node:
+ e.nodev(in)
+ return
+ case Node:
+ if !in.CanAddr() {
+ var n = reflect.New(in.Type()).Elem()
+ n.Set(in)
+ in = n
+ }
+ e.nodev(in.Addr())
+ return
+ case time.Time:
+ e.timev(tag, in)
+ return
+ case *time.Time:
+ e.timev(tag, in.Elem())
+ return
+ case time.Duration:
+ e.stringv(tag, reflect.ValueOf(value.String()))
+ return
+ case Marshaler:
+ v, err := value.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ e.marshal(tag, reflect.ValueOf(v))
+ return
+ case encoding.TextMarshaler:
+ text, err := value.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ case nil:
+ e.nilv()
+ return
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ e.marshal(tag, in.Elem())
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ e.marshal(tag, in.Elem())
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice, reflect.Array:
+ e.slicev(tag, in)
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ e.intv(tag, in)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = e.fieldByIndex(in, info.Inline)
+ if !value.IsValid() {
+ continue
+ }
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+ f()
+ yaml_mapping_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+// isOldBool returns whether s is bool notation as defined in YAML 1.1.
+//
+// We continue to force strings that YAML 1.1 would interpret as booleans to be
+// rendered as quotes strings so that the marshalled output valid for YAML 1.1
+// parsing.
+func isOldBool(s string) (result bool) {
+ switch s {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
+ "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ return true
+ default:
+ return false
+ }
+}
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ canUsePlain := true
+ switch {
+ case !utf8.ValidString(s):
+ if tag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ s = encodeBase64(s)
+ case tag == "":
+ // Check to see if it would resolve to a specific
+ // tag when encoded unquoted. If it doesn't,
+ // there's no need to quote it.
+ rtag, _ := resolve("", s)
+ canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
+ }
+ // Note: it's possible for user code to emit invalid YAML
+ // if they explicitly specify a tag and a string containing
+ // text that's incompatible with that tag.
+ switch {
+ case strings.Contains(s, "\n"):
+ if e.flow {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else {
+ style = yaml_LITERAL_SCALAR_STYLE
+ }
+ case canUsePlain:
+ style = yaml_PLAIN_SCALAR_STYLE
+ default:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+ t := in.Interface().(time.Time)
+ s := t.Format(time.RFC3339Nano)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // Issue #352: When formatting, use the precision of the underlying value
+ precision := 64
+ if in.Kind() == reflect.Float32 {
+ precision = 32
+ }
+
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
+ // TODO Kill this function. Replace all initialize calls by their underlining Go literals.
+ implicit := tag == ""
+ if !implicit {
+ tag = longTag(tag)
+ }
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.event.head_comment = head
+ e.event.line_comment = line
+ e.event.foot_comment = foot
+ e.event.tail_comment = tail
+ e.emit()
+}
+
+func (e *encoder) nodev(in reflect.Value) {
+ e.node(in.Interface().(*Node), "")
+}
+
+func (e *encoder) node(node *Node, tail string) {
+ // Zero nodes behave as nil.
+ if node.Kind == 0 && node.IsZero() {
+ e.nilv()
+ return
+ }
+
+ // If the tag was not explicitly requested, and dropping it won't change the
+ // implicit tag of the value, don't include it in the presentation.
+ var tag = node.Tag
+ var stag = shortTag(tag)
+ var forceQuoting bool
+ if tag != "" && node.Style&TaggedStyle == 0 {
+ if node.Kind == ScalarNode {
+ if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
+ tag = ""
+ } else {
+ rtag, _ := resolve("", node.Value)
+ if rtag == stag {
+ tag = ""
+ } else if stag == strTag {
+ tag = ""
+ forceQuoting = true
+ }
+ }
+ } else {
+ var rtag string
+ switch node.Kind {
+ case MappingNode:
+ rtag = mapTag
+ case SequenceNode:
+ rtag = seqTag
+ }
+ if rtag == stag {
+ tag = ""
+ }
+ }
+ }
+
+ switch node.Kind {
+ case DocumentNode:
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ yaml_document_end_event_initialize(&e.event, true)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case SequenceNode:
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case MappingNode:
+ style := yaml_BLOCK_MAPPING_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)
+ e.event.tail_comment = []byte(tail)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+
+ // The tail logic below moves the foot comment of prior keys to the following key,
+ // since the value for each key may be a nested structure and the foot needs to be
+ // processed only the entirety of the value is streamed. The last tail is processed
+ // with the mapping end event.
+ var tail string
+ for i := 0; i+1 < len(node.Content); i += 2 {
+ k := node.Content[i]
+ foot := k.FootComment
+ if foot != "" {
+ kopy := *k
+ kopy.FootComment = ""
+ k = &kopy
+ }
+ e.node(k, tail)
+ tail = foot
+
+ v := node.Content[i+1]
+ e.node(v, "")
+ }
+
+ yaml_mapping_end_event_initialize(&e.event)
+ e.event.tail_comment = []byte(tail)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case AliasNode:
+ yaml_alias_event_initialize(&e.event, []byte(node.Value))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case ScalarNode:
+ value := node.Value
+ if !utf8.ValidString(value) {
+ if stag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if stag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", stag)
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ value = encodeBase64(value)
+ }
+
+ style := yaml_PLAIN_SCALAR_STYLE
+ switch {
+ case node.Style&DoubleQuotedStyle != 0:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ case node.Style&SingleQuotedStyle != 0:
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ case node.Style&LiteralStyle != 0:
+ style = yaml_LITERAL_SCALAR_STYLE
+ case node.Style&FoldedStyle != 0:
+ style = yaml_FOLDED_SCALAR_STYLE
+ case strings.Contains(value, "\n"):
+ style = yaml_LITERAL_SCALAR_STYLE
+ case forceQuoting:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
+ default:
+ failf("cannot encode node with unknown kind %d", node.Kind)
+ }
+}
diff --git a/vendor/go.yaml.in/yaml/v3/parserc.go b/vendor/go.yaml.in/yaml/v3/parserc.go
new file mode 100644
index 000000000..25fe82363
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/parserc.go
@@ -0,0 +1,1274 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ token := &parser.tokens[parser.tokens_head]
+ yaml_parser_unfold_comments(parser, token)
+ return token
+ }
+ return nil
+}
+
+// yaml_parser_unfold_comments walks through the comments queue and joins all
+// comments behind the position of the provided token into the respective
+// top-level comment slices in the parser.
+func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) {
+ for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index {
+ comment := &parser.comments[parser.comments_head]
+ if len(comment.head) > 0 {
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ // No heads on ends, so keep comment.head for a follow up token.
+ break
+ }
+ if len(parser.head_comment) > 0 {
+ parser.head_comment = append(parser.head_comment, '\n')
+ }
+ parser.head_comment = append(parser.head_comment, comment.head...)
+ }
+ if len(comment.foot) > 0 {
+ if len(parser.foot_comment) > 0 {
+ parser.foot_comment = append(parser.foot_comment, '\n')
+ }
+ parser.foot_comment = append(parser.foot_comment, comment.foot...)
+ }
+ if len(comment.line) > 0 {
+ if len(parser.line_comment) > 0 {
+ parser.line_comment = append(parser.line_comment, '\n')
+ }
+ parser.line_comment = append(parser.line_comment, comment.line...)
+ }
+ *comment = yaml_comment_t{}
+ parser.comments_head++
+ }
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+//
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+//
+// *
+//
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ var head_comment []byte
+ if len(parser.head_comment) > 0 {
+ // [Go] Scan the header comment backwards, and if an empty line is found, break
+ // the header so the part before the last empty line goes into the
+ // document header, while the bottom of it goes into a follow up event.
+ for i := len(parser.head_comment) - 1; i > 0; i-- {
+ if parser.head_comment[i] == '\n' {
+ if i == len(parser.head_comment)-1 {
+ head_comment = parser.head_comment[:i]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ } else if parser.head_comment[i-1] == '\n' {
+ head_comment = parser.head_comment[:i-1]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ }
+ }
+ }
+ }
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+
+ head_comment: head_comment,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected ", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+// ***********
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+//
+// *************
+//
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ if len(event.head_comment) > 0 && len(event.foot_comment) == 0 {
+ event.foot_comment = event.head_comment
+ event.head_comment = nil
+ }
+ return true
+}
+
+func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) {
+ event.head_comment = parser.head_comment
+ event.line_comment = parser.line_comment
+ event.foot_comment = parser.foot_comment
+ parser.head_comment = nil
+ parser.line_comment = nil
+ parser.foot_comment = nil
+ parser.tail_comment = nil
+ parser.stem_comment = nil
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+//
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+//
+// block_node ::= ALIAS
+//
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+//
+// flow_node ::= ALIAS
+//
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+//
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+//
+// *************************
+//
+// block_content ::= block_collection | flow_collection | SCALAR
+//
+// ******
+//
+// flow_content ::= flow_collection | SCALAR
+//
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ if parser.stem_comment != nil {
+ event.head_comment = parser.stem_comment
+ parser.stem_comment = nil
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ if parser.stem_comment != nil {
+ event.head_comment = parser.stem_comment
+ parser.stem_comment = nil
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+//
+// ******************** *********** * *********
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ prior_head_len := len(parser.head_comment)
+ skip_token(parser)
+ yaml_parser_split_stem_comment(parser, prior_head_len)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+//
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ prior_head_len := len(parser.head_comment)
+ skip_token(parser)
+ yaml_parser_split_stem_comment(parser, prior_head_len)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Split stem comment from head comment.
+//
+// When a sequence or map is found under a sequence entry, the former head comment
+// is assigned to the underlying sequence or map as a whole, not the individual
+// sequence or map entry as would be expected otherwise. To handle this case the
+// previous head comment is moved aside as the stem comment.
+func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
+ if stem_len == 0 {
+ return
+ }
+
+ token := peek_token(parser)
+ if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN {
+ return
+ }
+
+ parser.stem_comment = parser.head_comment[:stem_len]
+ if len(parser.head_comment) == stem_len {
+ parser.head_comment = nil
+ } else {
+ // Copy suffix to prevent very strange bugs if someone ever appends
+ // further bytes to the prefix in the stem_comment slice above.
+ parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...)
+ }
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // [Go] A tail comment was left from the prior mapping value processed. Emit an event
+ // as it needs to be processed with that value and not the following key.
+ if len(parser.tail_comment) > 0 {
+ *event = yaml_event_t{
+ typ: yaml_TAIL_COMMENT_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ foot_comment: parser.tail_comment,
+ }
+ parser.tail_comment = nil
+ return true
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+//
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+//
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//
+// *
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//
+// *** *
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//
+// ***** *
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//
+// *
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+//
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+//
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// - *** *
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// - ***** *
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/go.yaml.in/yaml/v3/readerc.go b/vendor/go.yaml.in/yaml/v3/readerc.go
new file mode 100644
index 000000000..56af24536
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/readerc.go
@@ -0,0 +1,434 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/go.yaml.in/yaml/v3/resolve.go b/vendor/go.yaml.in/yaml/v3/resolve.go
new file mode 100644
index 000000000..64ae88805
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/resolve.go
@@ -0,0 +1,326 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, boolTag, []string{"true", "True", "TRUE"}},
+ {false, boolTag, []string{"false", "False", "FALSE"}},
+ {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", mergeTag, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const (
+ nullTag = "!!null"
+ boolTag = "!!bool"
+ strTag = "!!str"
+ intTag = "!!int"
+ floatTag = "!!float"
+ timestampTag = "!!timestamp"
+ seqTag = "!!seq"
+ mapTag = "!!map"
+ binaryTag = "!!binary"
+ mergeTag = "!!merge"
+)
+
+var longTags = make(map[string]string)
+var shortTags = make(map[string]string)
+
+func init() {
+ for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
+ ltag := longTag(stag)
+ longTags[stag] = ltag
+ shortTags[ltag] = stag
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ if strings.HasPrefix(tag, longTagPrefix) {
+ if stag, ok := shortTags[tag]; ok {
+ return stag
+ }
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ if ltag, ok := longTags[tag]; ok {
+ return ltag
+ }
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ tag = shortTag(tag)
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, strTag, binaryTag:
+ return
+ case floatTag:
+ if rtag == intTag {
+ switch v := out.(type) {
+ case int64:
+ rtag = floatTag
+ out = float64(v)
+ return
+ case int:
+ rtag = floatTag
+ out = float64(v)
+ return
+ }
+ }
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != strTag && tag != binaryTag {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == timestampTag {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return timestampTag, t
+ }
+ }
+
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ // Octals as introduced in version 1.2 of the spec.
+ // Octals from the 1.1 spec, spelled as 0777, are still
+ // decoded by default in v3 as well for compatibility.
+ // May be dropped in v4 depending on how usage evolves.
+ if strings.HasPrefix(plain, "0o") {
+ intv, err := strconv.ParseInt(plain[2:], 8, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 8, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0o") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ default:
+ panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ return strTag, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/vendor/go.yaml.in/yaml/v3/scannerc.go b/vendor/go.yaml.in/yaml/v3/scannerc.go
new file mode 100644
index 000000000..30b1f0892
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/scannerc.go
@@ -0,0 +1,3040 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ parser.newlines++
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ parser.newlines++
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.newlines++
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // [Go] The comment parsing logic requires a lookahead of two tokens
+ // so that foot comments may be parsed in time of associating them
+ // with the tokens that are parsed before them, and also for line
+ // comments to be transformed into head comments in some edge cases.
+ if parser.tokens_head < len(parser.tokens)-2 {
+ // If a potential simple key is at the head position, we need to fetch
+ // the next token to disambiguate it.
+ head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+ if !ok {
+ break
+ } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
+ return false
+ } else if !valid {
+ break
+ }
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ scan_mark := parser.mark
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // [Go] While unrolling indents, transform the head comments of prior
+ // indentation levels observed after scan_start into foot comments at
+ // the respective indexes.
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ comment_mark := parser.mark
+ if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') {
+ // Associate any following comments with the prior token.
+ comment_mark = parser.tokens[len(parser.tokens)-1].start_mark
+ }
+ defer func() {
+ if !ok {
+ return
+ }
+ if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN {
+ // Sequence indicators alone have no line comments. It becomes
+ // a head comment for whatever follows.
+ return
+ }
+ if !yaml_parser_scan_line_comment(parser, comment_mark) {
+ ok = false
+ return
+ }
+ }()
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] TODO Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+ if !simple_key.possible {
+ return false, true
+ }
+
+ // The 1.2 specification says:
+ //
+ // "If the ? indicator is omitted, parsing needs to see past the
+ // implicit key to recognize it as such. To limit the amount of
+ // lookahead required, the β:β indicator must appear at most 1024
+ // Unicode characters beyond the start of the key. In addition, the key
+ // is restricted to a single line."
+ //
+ if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return false, yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ return false, true
+ }
+ return true, true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ }
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
+ }
+ return true
+}
+
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+ possible: false,
+ required: false,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ })
+
+ // Increase the flow level.
+ parser.flow_level++
+ if parser.flow_level > max_flow_level {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+ }
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ last := len(parser.simple_keys) - 1
+ delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+ parser.simple_keys = parser.simple_keys[:last]
+ }
+ return true
+}
+
+// max_indents limits the indents stack size
+const max_indents = 10000
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+ if len(parser.indents) > max_indents {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_indents))
+ }
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ block_mark := scan_mark
+ block_mark.index--
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+
+ // [Go] Reposition the end token before potential following
+ // foot comments of parent blocks. For that, search
+ // backwards for recent comments that were at the same
+ // indent as the block that is ending now.
+ stop_index := block_mark.index
+ for i := len(parser.comments) - 1; i >= 0; i-- {
+ comment := &parser.comments[i]
+
+ if comment.end_mark.index < stop_index {
+ // Don't go back beyond the start of the comment/whitespace scan, unless column < 0.
+ // If requested indent column is < 0, then the document is over and everything else
+ // is a foot anyway.
+ break
+ }
+ if comment.start_mark.column == parser.indent+1 {
+ // This is a good match. But maybe there's a former comment
+ // at that same indent level, so keep searching.
+ block_mark = comment.start_mark
+ }
+
+ // While the end of the former comment matches with
+ // the start of the following one, we know there's
+ // nothing in between and scanning is still safe.
+ stop_index = comment.scan_mark.index
+ }
+
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: block_mark,
+ end_mark: block_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ parser.simple_keys_by_tok = make(map[int]int)
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+ return false
+
+ } else if valid {
+
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+ delete(parser.simple_keys_by_tok, simple_key.token_number)
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ scan_mark := parser.mark
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if we just had a line comment under a sequence entry that
+ // looks more like a header to the following content. Similar to this:
+ //
+ // - # The comment
+ // - Some data
+ //
+ // If so, transform the line comment to a head comment and reposition.
+ if len(parser.comments) > 0 && len(parser.tokens) > 1 {
+ tokenA := parser.tokens[len(parser.tokens)-2]
+ tokenB := parser.tokens[len(parser.tokens)-1]
+ comment := &parser.comments[len(parser.comments)-1]
+ if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) {
+ // If it was in the prior line, reposition so it becomes a
+ // header of the follow up token. Otherwise, keep it in place
+ // so it becomes a header of the former.
+ comment.head = comment.line
+ comment.line = nil
+ if comment.start_mark.line == parser.mark.line-1 {
+ comment.token_mark = parser.mark
+ }
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ if !yaml_parser_scan_comments(parser, scan_mark) {
+ return false
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+//
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ // [Go] Discard this inline comment for the time being.
+ //if !yaml_parser_scan_line_comment(parser, start_mark) {
+ // return false
+ //}
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+//
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+//
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+//
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+//
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] TODO Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ if !yaml_parser_scan_line_comment(parser, start_mark) {
+ return false
+ }
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab characters that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violates indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
+
+func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool {
+ if parser.newlines > 0 {
+ return true
+ }
+
+ var start_mark yaml_mark_t
+ var text []byte
+
+ for peek := 0; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ if parser.buffer[parser.buffer_pos+peek] == '#' {
+ seen := parser.mark.index + peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else if parser.mark.index >= seen {
+ if len(text) == 0 {
+ start_mark = parser.mark
+ }
+ text = read(parser, text)
+ } else {
+ skip(parser)
+ }
+ }
+ }
+ break
+ }
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ token_mark: token_mark,
+ start_mark: start_mark,
+ line: text,
+ })
+ }
+ return true
+}
+
+func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool {
+ token := parser.tokens[len(parser.tokens)-1]
+
+ if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 {
+ token = parser.tokens[len(parser.tokens)-2]
+ }
+
+ var token_mark = token.start_mark
+ var start_mark yaml_mark_t
+ var next_indent = parser.indent
+ if next_indent < 0 {
+ next_indent = 0
+ }
+
+ var recent_empty = false
+ var first_empty = parser.newlines <= 1
+
+ var line = parser.mark.line
+ var column = parser.mark.column
+
+ var text []byte
+
+ // The foot line is the place where a comment must start to
+ // still be considered as a foot of the prior content.
+ // If there's some content in the currently parsed line, then
+ // the foot is the line below it.
+ var foot_line = -1
+ if scan_mark.line > 0 {
+ foot_line = parser.mark.line - parser.newlines + 1
+ if parser.newlines == 0 && parser.mark.column > 1 {
+ foot_line++
+ }
+ }
+
+ var peek = 0
+ for ; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ column++
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ c := parser.buffer[parser.buffer_pos+peek]
+ var close_flow = parser.flow_level > 0 && (c == ']' || c == '}')
+ if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) {
+ // Got line break or terminator.
+ if close_flow || !recent_empty {
+ if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) {
+ // This is the first empty line and there were no empty lines before,
+ // so this initial part of the comment is a foot of the prior token
+ // instead of being a head for the following one. Split it up.
+ // Alternatively, this might also be the last comment inside a flow
+ // scope, so it must be a footer.
+ if len(text) > 0 {
+ if start_mark.column-1 < next_indent {
+ // If dedented it's unrelated to the prior token.
+ token_mark = start_mark
+ }
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+ } else {
+ if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 {
+ text = append(text, '\n')
+ }
+ }
+ }
+ if !is_break(parser.buffer, parser.buffer_pos+peek) {
+ break
+ }
+ first_empty = false
+ recent_empty = true
+ column = 0
+ line++
+ continue
+ }
+
+ if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) {
+ // The comment at the different indentation is a foot of the
+ // preceding data rather than a head of the upcoming one.
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+
+ if parser.buffer[parser.buffer_pos+peek] != '#' {
+ break
+ }
+
+ if len(text) == 0 {
+ start_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ } else {
+ text = append(text, '\n')
+ }
+
+ recent_empty = false
+
+ // Consume until after the consumed comment line.
+ seen := parser.mark.index + peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else if parser.mark.index >= seen {
+ text = read(parser, text)
+ } else {
+ skip(parser)
+ }
+ }
+
+ peek = 0
+ column = 0
+ line = parser.mark.line
+ next_indent = parser.indent
+ if next_indent < 0 {
+ next_indent = 0
+ }
+ }
+
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: start_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column},
+ head: text,
+ })
+ }
+ return true
+}
diff --git a/vendor/go.yaml.in/yaml/v3/sorter.go b/vendor/go.yaml.in/yaml/v3/sorter.go
new file mode 100644
index 000000000..9210ece7e
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/sorter.go
@@ -0,0 +1,134 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ digits := false
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ digits = unicode.IsDigit(ar[i])
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ if digits {
+ return al
+ } else {
+ return bl
+ }
+ }
+ var ai, bi int
+ var an, bn int64
+ if ar[i] == '0' || br[i] == '0' {
+ for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ if ar[j] != '0' {
+ an = 1
+ bn = 1
+ break
+ }
+ }
+ }
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/vendor/go.yaml.in/yaml/v3/writerc.go b/vendor/go.yaml.in/yaml/v3/writerc.go
new file mode 100644
index 000000000..266d0b092
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/writerc.go
@@ -0,0 +1,48 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+}
diff --git a/vendor/go.yaml.in/yaml/v3/yaml.go b/vendor/go.yaml.in/yaml/v3/yaml.go
new file mode 100644
index 000000000..0b101cd20
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/yaml.go
@@ -0,0 +1,703 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/yaml/go-yaml
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+ "unicode/utf8"
+)
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document.
+type Unmarshaler interface {
+ UnmarshalYAML(value *Node) error
+}
+
+type obsoleteUnmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// A Decoder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ parser *parser
+ knownFields bool
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// KnownFields ensures that the keys in decoded mappings to
+// exist as fields in the struct being decoded into.
+func (dec *Decoder) KnownFields(enable bool) {
+ dec.knownFields = enable
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ d.knownFields = dec.knownFields
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Decode decodes the node and stores its data into the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (n *Node) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ defer handleErr(&err)
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(n, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder()
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[][,[,]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be excluded if IsZero returns true.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// Encode encodes value v and stores its representation in n.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values into YAML.
+func (n *Node) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(v))
+ e.finish()
+ p := newParser(e.out)
+ p.textless = true
+ defer p.destroy()
+ doc := p.parse()
+ *n = *doc.Content[0]
+ return nil
+}
+
+// SetIndent changes the used indentation used when encoding.
+func (e *Encoder) SetIndent(spaces int) {
+ if spaces < 0 {
+ panic("yaml: cannot indent to a negative number of spaces")
+ }
+ e.encoder.indent = spaces
+}
+
+// CompactSeqIndent makes it so that '- ' is considered part of the indentation.
+func (e *Encoder) CompactSeqIndent() {
+ e.encoder.emitter.compact_sequence_indent = true
+}
+
+// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation.
+func (e *Encoder) DefaultSeqIndent() {
+ e.encoder.emitter.compact_sequence_indent = false
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+type Kind uint32
+
+const (
+ DocumentNode Kind = 1 << iota
+ SequenceNode
+ MappingNode
+ ScalarNode
+ AliasNode
+)
+
+type Style uint32
+
+const (
+ TaggedStyle Style = 1 << iota
+ DoubleQuotedStyle
+ SingleQuotedStyle
+ LiteralStyle
+ FoldedStyle
+ FlowStyle
+)
+
+// Node represents an element in the YAML document hierarchy. While documents
+// are typically encoded and decoded into higher level types, such as structs
+// and maps, Node is an intermediate representation that allows detailed
+// control over the content being decoded or encoded.
+//
+// It's worth noting that although Node offers access into details such as
+// line numbers, colums, and comments, the content when re-encoded will not
+// have its original textual representation preserved. An effort is made to
+// render the data plesantly, and to preserve comments near the data they
+// describe, though.
+//
+// Values that make use of the Node type interact with the yaml package in the
+// same way any other type would do, by encoding and decoding yaml data
+// directly or indirectly into them.
+//
+// For example:
+//
+// var person struct {
+// Name string
+// Address yaml.Node
+// }
+// err := yaml.Unmarshal(data, &person)
+//
+// Or by itself:
+//
+// var person Node
+// err := yaml.Unmarshal(data, &person)
+type Node struct {
+ // Kind defines whether the node is a document, a mapping, a sequence,
+ // a scalar value, or an alias to another node. The specific data type of
+ // scalar nodes may be obtained via the ShortTag and LongTag methods.
+ Kind Kind
+
+ // Style allows customizing the apperance of the node in the tree.
+ Style Style
+
+ // Tag holds the YAML tag defining the data type for the value.
+ // When decoding, this field will always be set to the resolved tag,
+ // even when it wasn't explicitly provided in the YAML content.
+ // When encoding, if this field is unset the value type will be
+ // implied from the node properties, and if it is set, it will only
+ // be serialized into the representation if TaggedStyle is used or
+ // the implicit tag diverges from the provided one.
+ Tag string
+
+ // Value holds the unescaped and unquoted represenation of the value.
+ Value string
+
+ // Anchor holds the anchor name for this node, which allows aliases to point to it.
+ Anchor string
+
+ // Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
+ Alias *Node
+
+ // Content holds contained nodes for documents, mappings, and sequences.
+ Content []*Node
+
+ // HeadComment holds any comments in the lines preceding the node and
+ // not separated by an empty line.
+ HeadComment string
+
+ // LineComment holds any comments at the end of the line where the node is in.
+ LineComment string
+
+ // FootComment holds any comments following the node and before empty lines.
+ FootComment string
+
+ // Line and Column hold the node position in the decoded YAML text.
+ // These fields are not respected when encoding the node.
+ Line int
+ Column int
+}
+
+// IsZero returns whether the node has all of its fields unset.
+func (n *Node) IsZero() bool {
+ return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil &&
+ n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0
+}
+
+// LongTag returns the long form of the tag that indicates the data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) LongTag() string {
+ return longTag(n.ShortTag())
+}
+
+// ShortTag returns the short form of the YAML tag that indicates data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) ShortTag() string {
+ if n.indicatedString() {
+ return strTag
+ }
+ if n.Tag == "" || n.Tag == "!" {
+ switch n.Kind {
+ case MappingNode:
+ return mapTag
+ case SequenceNode:
+ return seqTag
+ case AliasNode:
+ if n.Alias != nil {
+ return n.Alias.ShortTag()
+ }
+ case ScalarNode:
+ tag, _ := resolve("", n.Value)
+ return tag
+ case 0:
+ // Special case to make the zero value convenient.
+ if n.IsZero() {
+ return nullTag
+ }
+ }
+ return ""
+ }
+ return shortTag(n.Tag)
+}
+
+func (n *Node) indicatedString() bool {
+ return n.Kind == ScalarNode &&
+ (shortTag(n.Tag) == strTag ||
+ (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
+}
+
+// SetString is a convenience function that sets the node to a string value
+// and defines its style in a pleasant way depending on its content.
+func (n *Node) SetString(s string) {
+ n.Kind = ScalarNode
+ if utf8.ValidString(s) {
+ n.Value = s
+ n.Tag = strTag
+ } else {
+ n.Value = encodeBase64(s)
+ n.Tag = binaryTag
+ }
+ if strings.Contains(n.Value, "\n") {
+ n.Style = LiteralStyle
+ }
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+
+ // InlineUnmarshalers holds indexes to inlined fields that
+ // contain unmarshaler values.
+ InlineUnmarshalers [][]int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+var unmarshalerType reflect.Type
+
+func init() {
+ var v Unmarshaler
+ unmarshalerType = reflect.ValueOf(&v).Elem().Type()
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ inlineUnmarshalers := [][]int(nil)
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct, reflect.Ptr:
+ ftype := field.Type
+ for ftype.Kind() == reflect.Ptr {
+ ftype = ftype.Elem()
+ }
+ if ftype.Kind() != reflect.Struct {
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ if reflect.PtrTo(ftype).Implements(unmarshalerType) {
+ inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
+ } else {
+ sinfo, err := getStructInfo(ftype)
+ if err != nil {
+ return nil, err
+ }
+ for _, index := range sinfo.InlineUnmarshalers {
+ inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ }
+ default:
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ InlineUnmarshalers: inlineUnmarshalers,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/go.yaml.in/yaml/v3/yamlh.go b/vendor/go.yaml.in/yaml/v3/yamlh.go
new file mode 100644
index 000000000..f59aa40f6
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/yamlh.go
@@ -0,0 +1,811 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "fmt"
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
+
+ yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return ""
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+ yaml_TAIL_COMMENT_EVENT
+)
+
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+ yaml_TAIL_COMMENT_EVENT: "tail comment",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+//
+// yaml_parser_set_input().
+//
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return ""
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ newlines int // The number of line breaks since last non-break/non-blank character
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Comments
+
+ head_comment []byte // The current head comments
+ line_comment []byte // The current line comments
+ foot_comment []byte // The current foot comments
+ tail_comment []byte // Foot comment that happens at the end of a block.
+ stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
+
+ comments []yaml_comment_t // The folded comments for all parsed tokens
+ comments_head int
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+ simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+type yaml_comment_t struct {
+ scan_mark yaml_mark_t // Position where scanning for comments started
+ token_mark yaml_mark_t // Position after which tokens will be associated with this comment
+ start_mark yaml_mark_t // Position of '#' comment mark
+ end_mark yaml_mark_t // Position where comment terminated
+
+ head []byte
+ line []byte
+ foot []byte
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+//
+// yaml_emitter_set_output().
+//
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_writer io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements?
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ space_above bool // Is there's an empty line above?
+ foot_indent int // The indent used to write the foot comment above, or -1 if none.
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ key_line_comment []byte
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/go.yaml.in/yaml/v3/yamlprivateh.go b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go
new file mode 100644
index 000000000..dea1ba961
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go
@@ -0,0 +1,198 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return (
+ // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return (
+ // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return (
+ // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index 382ad6941..c9b343c71 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -129,6 +129,13 @@ type State struct {
// brand new implementation of this interface. For the situations like
// testing, the new implementation should embed this interface. This allows
// gRPC to add new methods to this interface.
+//
+// NOTICE: This interface is intended to be implemented by gRPC, or intercepted
+// by custom load balancing polices. Users should not need their own complete
+// implementation of this interface -- they should always delegate to a
+// ClientConn passed to Builder.Build() by embedding it in their
+// implementations. An embedded ClientConn must never be nil, or runtime panics
+// will occur.
type ClientConn interface {
// NewSubConn is called by balancer to create a new SubConn.
// It doesn't block and wait for the connections to be established.
@@ -167,6 +174,17 @@ type ClientConn interface {
//
// Deprecated: Use the Target field in the BuildOptions instead.
Target() string
+
+ // MetricsRecorder provides the metrics recorder that balancers can use to
+ // record metrics. Balancer implementations which do not register metrics on
+ // metrics registry and record on them can ignore this method. The returned
+ // MetricsRecorder is guaranteed to never be nil.
+ MetricsRecorder() estats.MetricsRecorder
+
+ // EnforceClientConnEmbedding is included to force implementers to embed
+ // another implementation of this interface, allowing gRPC to add methods
+ // without breaking users.
+ internal.EnforceClientConnEmbedding
}
// BuildOptions contains additional information for Build.
@@ -198,10 +216,6 @@ type BuildOptions struct {
// same resolver.Target as passed to the resolver. See the documentation for
// the resolver.Target type for details about what it contains.
Target resolver.Target
- // MetricsRecorder is the metrics recorder that balancers can use to record
- // metrics. Balancer implementations which do not register metrics on
- // metrics registry and record on them can ignore this field.
- MetricsRecorder estats.MetricsRecorder
}
// Builder creates a balancer.
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index d5ed172ae..4d576876d 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -41,7 +41,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) ba
cc: cc,
pickerBuilder: bb.pickerBuilder,
- subConns: resolver.NewAddressMap(),
+ subConns: resolver.NewAddressMapV2[balancer.SubConn](),
scStates: make(map[balancer.SubConn]connectivity.State),
csEvltr: &balancer.ConnectivityStateEvaluator{},
config: bb.config,
@@ -65,7 +65,7 @@ type baseBalancer struct {
csEvltr *balancer.ConnectivityStateEvaluator
state connectivity.State
- subConns *resolver.AddressMap
+ subConns *resolver.AddressMapV2[balancer.SubConn]
scStates map[balancer.SubConn]connectivity.State
picker balancer.Picker
config Config
@@ -100,7 +100,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
// Successful resolution; clear resolver error and ensure we return nil.
b.resolverErr = nil
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
- addrsSet := resolver.NewAddressMap()
+ addrsSet := resolver.NewAddressMapV2[any]()
for _, a := range s.ResolverState.Addresses {
addrsSet.Set(a, nil)
if _, ok := b.subConns.Get(a); !ok {
@@ -122,8 +122,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
}
}
for _, a := range b.subConns.Keys() {
- sci, _ := b.subConns.Get(a)
- sc := sci.(balancer.SubConn)
+ sc, _ := b.subConns.Get(a)
// a was removed by resolver.
if _, ok := addrsSet.Get(a); !ok {
sc.Shutdown()
@@ -173,8 +172,7 @@ func (b *baseBalancer) regeneratePicker() {
// Filter out all ready SCs from full subConn map.
for _, addr := range b.subConns.Keys() {
- sci, _ := b.subConns.Get(addr)
- sc := sci.(balancer.SubConn)
+ sc, _ := b.subConns.Get(addr)
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
readySCs[sc] = SubConnInfo{Address: addr}
}
diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
new file mode 100644
index 000000000..cc606f4da
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
@@ -0,0 +1,356 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package endpointsharding implements a load balancing policy that manages
+// homogeneous child policies each owning a single endpoint.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package endpointsharding
+
+import (
+ "errors"
+ rand "math/rand/v2"
+ "sync"
+ "sync/atomic"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/base"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/resolver"
+)
+
+// ChildState is the balancer state of a child along with the endpoint which
+// identifies the child balancer.
+type ChildState struct {
+ Endpoint resolver.Endpoint
+ State balancer.State
+
+ // Balancer exposes only the ExitIdler interface of the child LB policy.
+ // Other methods of the child policy are called only by endpointsharding.
+ Balancer balancer.ExitIdler
+}
+
+// Options are the options to configure the behaviour of the
+// endpointsharding balancer.
+type Options struct {
+ // DisableAutoReconnect allows the balancer to keep child balancer in the
+ // IDLE state until they are explicitly triggered to exit using the
+ // ChildState obtained from the endpointsharding picker. When set to false,
+ // the endpointsharding balancer will automatically call ExitIdle on child
+ // connections that report IDLE.
+ DisableAutoReconnect bool
+}
+
+// ChildBuilderFunc creates a new balancer with the ClientConn. It has the same
+// type as the balancer.Builder.Build method.
+type ChildBuilderFunc func(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer
+
+// NewBalancer returns a load balancing policy that manages homogeneous child
+// policies each owning a single endpoint. The endpointsharding balancer
+// forwards the LoadBalancingConfig in ClientConn state updates to its children.
+func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions, childBuilder ChildBuilderFunc, esOpts Options) balancer.Balancer {
+ es := &endpointSharding{
+ cc: cc,
+ bOpts: opts,
+ esOpts: esOpts,
+ childBuilder: childBuilder,
+ }
+ es.children.Store(resolver.NewEndpointMap[*balancerWrapper]())
+ return es
+}
+
+// endpointSharding is a balancer that wraps child balancers. It creates a child
+// balancer with child config for every unique Endpoint received. It updates the
+// child states on any update from parent or child.
+type endpointSharding struct {
+ cc balancer.ClientConn
+ bOpts balancer.BuildOptions
+ esOpts Options
+ childBuilder ChildBuilderFunc
+
+ // childMu synchronizes calls to any single child. It must be held for all
+ // calls into a child. To avoid deadlocks, do not acquire childMu while
+ // holding mu.
+ childMu sync.Mutex
+ children atomic.Pointer[resolver.EndpointMap[*balancerWrapper]]
+
+ // inhibitChildUpdates is set during UpdateClientConnState/ResolverError
+ // calls (calls to children will each produce an update, only want one
+ // update).
+ inhibitChildUpdates atomic.Bool
+
+ // mu synchronizes access to the state stored in balancerWrappers in the
+ // children field. mu must not be held during calls into a child since
+ // synchronous calls back from the child may require taking mu, causing a
+ // deadlock. To avoid deadlocks, do not acquire childMu while holding mu.
+ mu sync.Mutex
+}
+
+// UpdateClientConnState creates a child for new endpoints and deletes children
+// for endpoints that are no longer present. It also updates all the children,
+// and sends a single synchronous update of the childrens' aggregated state at
+// the end of the UpdateClientConnState operation. If any endpoint has no
+// addresses it will ignore that endpoint. Otherwise, returns first error found
+// from a child, but fully processes the new update.
+func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState) error {
+ es.childMu.Lock()
+ defer es.childMu.Unlock()
+
+ es.inhibitChildUpdates.Store(true)
+ defer func() {
+ es.inhibitChildUpdates.Store(false)
+ es.updateState()
+ }()
+ var ret error
+
+ children := es.children.Load()
+ newChildren := resolver.NewEndpointMap[*balancerWrapper]()
+
+ // Update/Create new children.
+ for _, endpoint := range state.ResolverState.Endpoints {
+ if _, ok := newChildren.Get(endpoint); ok {
+ // Endpoint child was already created, continue to avoid duplicate
+ // update.
+ continue
+ }
+ childBalancer, ok := children.Get(endpoint)
+ if ok {
+ // Endpoint attributes may have changed, update the stored endpoint.
+ es.mu.Lock()
+ childBalancer.childState.Endpoint = endpoint
+ es.mu.Unlock()
+ } else {
+ childBalancer = &balancerWrapper{
+ childState: ChildState{Endpoint: endpoint},
+ ClientConn: es.cc,
+ es: es,
+ }
+ childBalancer.childState.Balancer = childBalancer
+ childBalancer.child = es.childBuilder(childBalancer, es.bOpts)
+ }
+ newChildren.Set(endpoint, childBalancer)
+ if err := childBalancer.updateClientConnStateLocked(balancer.ClientConnState{
+ BalancerConfig: state.BalancerConfig,
+ ResolverState: resolver.State{
+ Endpoints: []resolver.Endpoint{endpoint},
+ Attributes: state.ResolverState.Attributes,
+ },
+ }); err != nil && ret == nil {
+ // Return first error found, and always commit full processing of
+ // updating children. If desired to process more specific errors
+ // across all endpoints, caller should make these specific
+ // validations, this is a current limitation for simplicity sake.
+ ret = err
+ }
+ }
+ // Delete old children that are no longer present.
+ for _, e := range children.Keys() {
+ child, _ := children.Get(e)
+ if _, ok := newChildren.Get(e); !ok {
+ child.closeLocked()
+ }
+ }
+ es.children.Store(newChildren)
+ if newChildren.Len() == 0 {
+ return balancer.ErrBadResolverState
+ }
+ return ret
+}
+
+// ResolverError forwards the resolver error to all of the endpointSharding's
+// children and sends a single synchronous update of the childStates at the end
+// of the ResolverError operation.
+func (es *endpointSharding) ResolverError(err error) {
+ es.childMu.Lock()
+ defer es.childMu.Unlock()
+ es.inhibitChildUpdates.Store(true)
+ defer func() {
+ es.inhibitChildUpdates.Store(false)
+ es.updateState()
+ }()
+ children := es.children.Load()
+ for _, child := range children.Values() {
+ child.resolverErrorLocked(err)
+ }
+}
+
+func (es *endpointSharding) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) {
+ // UpdateSubConnState is deprecated.
+}
+
+func (es *endpointSharding) Close() {
+ es.childMu.Lock()
+ defer es.childMu.Unlock()
+ children := es.children.Load()
+ for _, child := range children.Values() {
+ child.closeLocked()
+ }
+}
+
+// updateState updates this component's state. It sends the aggregated state,
+// and a picker with round robin behavior with all the child states present if
+// needed.
+func (es *endpointSharding) updateState() {
+ if es.inhibitChildUpdates.Load() {
+ return
+ }
+ var readyPickers, connectingPickers, idlePickers, transientFailurePickers []balancer.Picker
+
+ es.mu.Lock()
+ defer es.mu.Unlock()
+
+ children := es.children.Load()
+ childStates := make([]ChildState, 0, children.Len())
+
+ for _, child := range children.Values() {
+ childState := child.childState
+ childStates = append(childStates, childState)
+ childPicker := childState.State.Picker
+ switch childState.State.ConnectivityState {
+ case connectivity.Ready:
+ readyPickers = append(readyPickers, childPicker)
+ case connectivity.Connecting:
+ connectingPickers = append(connectingPickers, childPicker)
+ case connectivity.Idle:
+ idlePickers = append(idlePickers, childPicker)
+ case connectivity.TransientFailure:
+ transientFailurePickers = append(transientFailurePickers, childPicker)
+ // connectivity.Shutdown shouldn't appear.
+ }
+ }
+
+ // Construct the round robin picker based off the aggregated state. Whatever
+ // the aggregated state, use the pickers present that are currently in that
+ // state only.
+ var aggState connectivity.State
+ var pickers []balancer.Picker
+ if len(readyPickers) >= 1 {
+ aggState = connectivity.Ready
+ pickers = readyPickers
+ } else if len(connectingPickers) >= 1 {
+ aggState = connectivity.Connecting
+ pickers = connectingPickers
+ } else if len(idlePickers) >= 1 {
+ aggState = connectivity.Idle
+ pickers = idlePickers
+ } else if len(transientFailurePickers) >= 1 {
+ aggState = connectivity.TransientFailure
+ pickers = transientFailurePickers
+ } else {
+ aggState = connectivity.TransientFailure
+ pickers = []balancer.Picker{base.NewErrPicker(errors.New("no children to pick from"))}
+ } // No children (resolver error before valid update).
+ p := &pickerWithChildStates{
+ pickers: pickers,
+ childStates: childStates,
+ next: uint32(rand.IntN(len(pickers))),
+ }
+ es.cc.UpdateState(balancer.State{
+ ConnectivityState: aggState,
+ Picker: p,
+ })
+}
+
+// pickerWithChildStates delegates to the pickers it holds in a round robin
+// fashion. It also contains the childStates of all the endpointSharding's
+// children.
+type pickerWithChildStates struct {
+ pickers []balancer.Picker
+ childStates []ChildState
+ next uint32
+}
+
+func (p *pickerWithChildStates) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+ nextIndex := atomic.AddUint32(&p.next, 1)
+ picker := p.pickers[nextIndex%uint32(len(p.pickers))]
+ return picker.Pick(info)
+}
+
+// ChildStatesFromPicker returns the state of all the children managed by the
+// endpoint sharding balancer that created this picker.
+func ChildStatesFromPicker(picker balancer.Picker) []ChildState {
+ p, ok := picker.(*pickerWithChildStates)
+ if !ok {
+ return nil
+ }
+ return p.childStates
+}
+
+// balancerWrapper is a wrapper of a balancer. It ID's a child balancer by
+// endpoint, and persists recent child balancer state.
+type balancerWrapper struct {
+ // The following fields are initialized at build time and read-only after
+ // that and therefore do not need to be guarded by a mutex.
+
+ // child contains the wrapped balancer. Access its methods only through
+ // methods on balancerWrapper to ensure proper synchronization
+ child balancer.Balancer
+ balancer.ClientConn // embed to intercept UpdateState, doesn't deal with SubConns
+
+ es *endpointSharding
+
+ // Access to the following fields is guarded by es.mu.
+
+ childState ChildState
+ isClosed bool
+}
+
+func (bw *balancerWrapper) UpdateState(state balancer.State) {
+ bw.es.mu.Lock()
+ bw.childState.State = state
+ bw.es.mu.Unlock()
+ if state.ConnectivityState == connectivity.Idle && !bw.es.esOpts.DisableAutoReconnect {
+ bw.ExitIdle()
+ }
+ bw.es.updateState()
+}
+
+// ExitIdle pings an IDLE child balancer to exit idle in a new goroutine to
+// avoid deadlocks due to synchronous balancer state updates.
+func (bw *balancerWrapper) ExitIdle() {
+ if ei, ok := bw.child.(balancer.ExitIdler); ok {
+ go func() {
+ bw.es.childMu.Lock()
+ if !bw.isClosed {
+ ei.ExitIdle()
+ }
+ bw.es.childMu.Unlock()
+ }()
+ }
+}
+
+// updateClientConnStateLocked delivers the ClientConnState to the child
+// balancer. Callers must hold the child mutex of the parent endpointsharding
+// balancer.
+func (bw *balancerWrapper) updateClientConnStateLocked(ccs balancer.ClientConnState) error {
+ return bw.child.UpdateClientConnState(ccs)
+}
+
+// closeLocked closes the child balancer. Callers must hold the child mutext of
+// the parent endpointsharding balancer.
+func (bw *balancerWrapper) closeLocked() {
+ bw.child.Close()
+ bw.isClosed = true
+}
+
+func (bw *balancerWrapper) resolverErrorLocked(err error) {
+ bw.child.ResolverError(err)
+}
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
index 2fc0a71f9..494314f23 100644
--- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
@@ -54,9 +54,18 @@ func init() {
balancer.Register(pickfirstBuilder{})
}
-// enableHealthListenerKeyType is a unique key type used in resolver attributes
-// to indicate whether the health listener usage is enabled.
-type enableHealthListenerKeyType struct{}
+type (
+ // enableHealthListenerKeyType is a unique key type used in resolver
+ // attributes to indicate whether the health listener usage is enabled.
+ enableHealthListenerKeyType struct{}
+ // managedByPickfirstKeyType is an attribute key type to inform Outlier
+ // Detection that the generic health listener is being used.
+ // TODO: https://github.com/grpc/grpc-go/issues/7915 - Remove this when
+ // implementing the dualstack design. This is a hack. Once Dualstack is
+ // completed, outlier detection will stop sending ejection updates through
+ // the connectivity listener.
+ managedByPickfirstKeyType struct{}
+)
var (
logger = grpclog.Component("pick-first-leaf-lb")
@@ -111,9 +120,9 @@ func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions)
b := &pickfirstBalancer{
cc: cc,
target: bo.Target.String(),
- metricsRecorder: bo.MetricsRecorder, // ClientConn will always create a Metrics Recorder.
+ metricsRecorder: cc.MetricsRecorder(),
- subConns: resolver.NewAddressMap(),
+ subConns: resolver.NewAddressMapV2[*scData](),
state: connectivity.Connecting,
cancelConnectionTimer: func() {},
}
@@ -140,6 +149,17 @@ func EnableHealthListener(state resolver.State) resolver.State {
return state
}
+// IsManagedByPickfirst returns whether an address belongs to a SubConn
+// managed by the pickfirst LB policy.
+// TODO: https://github.com/grpc/grpc-go/issues/7915 - This is a hack to disable
+// outlier_detection via the with connectivity listener when using pick_first.
+// Once Dualstack changes are complete, all SubConns will be created by
+// pick_first and outlier detection will only use the health listener for
+// ejection. This hack can then be removed.
+func IsManagedByPickfirst(addr resolver.Address) bool {
+ return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil
+}
+
type pfConfig struct {
serviceconfig.LoadBalancingConfig `json:"-"`
@@ -166,6 +186,7 @@ type scData struct {
}
func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
+ addr.BalancerAttributes = addr.BalancerAttributes.WithValue(managedByPickfirstKeyType{}, true)
sd := &scData{
rawConnectivityState: connectivity.Idle,
effectiveState: connectivity.Idle,
@@ -199,7 +220,7 @@ type pickfirstBalancer struct {
// updates.
state connectivity.State
// scData for active subonns mapped by address.
- subConns *resolver.AddressMap
+ subConns *resolver.AddressMapV2[*scData]
addressList addressList
firstPass bool
numTF int
@@ -298,7 +319,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
prevAddr := b.addressList.currentAddress()
prevSCData, found := b.subConns.Get(prevAddr)
prevAddrsCount := b.addressList.size()
- isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready
+ isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready
b.addressList.updateAddrs(newAddrs)
// If the previous ready SubConn exists in new address list,
@@ -360,21 +381,21 @@ func (b *pickfirstBalancer) startFirstPassLocked() {
b.numTF = 0
// Reset the connection attempt record for existing SubConns.
for _, sd := range b.subConns.Values() {
- sd.(*scData).connectionFailedInFirstPass = false
+ sd.connectionFailedInFirstPass = false
}
b.requestConnectionLocked()
}
func (b *pickfirstBalancer) closeSubConnsLocked() {
for _, sd := range b.subConns.Values() {
- sd.(*scData).subConn.Shutdown()
+ sd.subConn.Shutdown()
}
- b.subConns = resolver.NewAddressMap()
+ b.subConns = resolver.NewAddressMapV2[*scData]()
}
// deDupAddresses ensures that each address appears only once in the slice.
func deDupAddresses(addrs []resolver.Address) []resolver.Address {
- seenAddrs := resolver.NewAddressMap()
+ seenAddrs := resolver.NewAddressMapV2[*scData]()
retAddrs := []resolver.Address{}
for _, addr := range addrs {
@@ -460,7 +481,7 @@ func addressFamily(address string) ipAddrFamily {
// This ensures that the subchannel map accurately reflects the current set of
// addresses received from the name resolver.
func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
- newAddrsMap := resolver.NewAddressMap()
+ newAddrsMap := resolver.NewAddressMapV2[bool]()
for _, addr := range newAddrs {
newAddrsMap.Set(addr, true)
}
@@ -470,7 +491,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address)
continue
}
val, _ := b.subConns.Get(oldAddr)
- val.(*scData).subConn.Shutdown()
+ val.subConn.Shutdown()
b.subConns.Delete(oldAddr)
}
}
@@ -479,13 +500,12 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address)
// becomes ready, which means that all other subConn must be shutdown.
func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
b.cancelConnectionTimer()
- for _, v := range b.subConns.Values() {
- sd := v.(*scData)
+ for _, sd := range b.subConns.Values() {
if sd.subConn != selected.subConn {
sd.subConn.Shutdown()
}
}
- b.subConns = resolver.NewAddressMap()
+ b.subConns = resolver.NewAddressMapV2[*scData]()
b.subConns.Set(selected.addr, selected)
}
@@ -518,18 +538,17 @@ func (b *pickfirstBalancer) requestConnectionLocked() {
b.subConns.Set(curAddr, sd)
}
- scd := sd.(*scData)
- switch scd.rawConnectivityState {
+ switch sd.rawConnectivityState {
case connectivity.Idle:
- scd.subConn.Connect()
+ sd.subConn.Connect()
b.scheduleNextConnectionLocked()
return
case connectivity.TransientFailure:
// The SubConn is being re-used and failed during a previous pass
// over the addressList. It has not completed backoff yet.
// Mark it as having failed and try the next address.
- scd.connectionFailedInFirstPass = true
- lastErr = scd.lastErr
+ sd.connectionFailedInFirstPass = true
+ lastErr = sd.lastErr
continue
case connectivity.Connecting:
// Wait for the connection attempt to complete or the timer to fire
@@ -537,7 +556,7 @@ func (b *pickfirstBalancer) requestConnectionLocked() {
b.scheduleNextConnectionLocked()
return
default:
- b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState)
+ b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState)
return
}
@@ -732,8 +751,7 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
}
// Connect() has been called on all the SubConns. The first pass can be
// ended if all the SubConns have reported a failure.
- for _, v := range b.subConns.Values() {
- sd := v.(*scData)
+ for _, sd := range b.subConns.Values() {
if !sd.connectionFailedInFirstPass {
return
}
@@ -744,8 +762,7 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
Picker: &picker{err: lastErr},
})
// Start re-connecting all the SubConns that are already in IDLE.
- for _, v := range b.subConns.Values() {
- sd := v.(*scData)
+ for _, sd := range b.subConns.Values() {
if sd.rawConnectivityState == connectivity.Idle {
sd.subConn.Connect()
}
@@ -906,6 +923,5 @@ func (al *addressList) hasNext() bool {
// fields that are meaningful to the SubConn.
func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
return a.Addr == b.Addr && a.ServerName == b.ServerName &&
- a.Attributes.Equal(b.Attributes) &&
- a.Metadata == b.Metadata
+ a.Attributes.Equal(b.Attributes)
}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
index 80a42d225..35da5d1ec 100644
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -22,12 +22,13 @@
package roundrobin
import (
- rand "math/rand/v2"
- "sync/atomic"
+ "fmt"
"google.golang.org/grpc/balancer"
- "google.golang.org/grpc/balancer/base"
+ "google.golang.org/grpc/balancer/endpointsharding"
+ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf"
"google.golang.org/grpc/grpclog"
+ internalgrpclog "google.golang.org/grpc/internal/grpclog"
)
// Name is the name of round_robin balancer.
@@ -35,47 +36,44 @@ const Name = "round_robin"
var logger = grpclog.Component("roundrobin")
-// newBuilder creates a new roundrobin balancer builder.
-func newBuilder() balancer.Builder {
- return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
-}
-
func init() {
- balancer.Register(newBuilder())
+ balancer.Register(builder{})
}
-type rrPickerBuilder struct{}
+type builder struct{}
-func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
- logger.Infof("roundrobinPicker: Build called with info: %v", info)
- if len(info.ReadySCs) == 0 {
- return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
- }
- scs := make([]balancer.SubConn, 0, len(info.ReadySCs))
- for sc := range info.ReadySCs {
- scs = append(scs, sc)
- }
- return &rrPicker{
- subConns: scs,
- // Start at a random index, as the same RR balancer rebuilds a new
- // picker when SubConn states change, and we don't want to apply excess
- // load to the first server in the list.
- next: uint32(rand.IntN(len(scs))),
+func (bb builder) Name() string {
+ return Name
+}
+
+func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
+ childBuilder := balancer.Get(pickfirstleaf.Name).Build
+ bal := &rrBalancer{
+ cc: cc,
+ Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}),
}
+ bal.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[%p] ", bal))
+ bal.logger.Infof("Created")
+ return bal
}
-type rrPicker struct {
- // subConns is the snapshot of the roundrobin balancer when this picker was
- // created. The slice is immutable. Each Get() will do a round robin
- // selection from it and return the selected SubConn.
- subConns []balancer.SubConn
- next uint32
+type rrBalancer struct {
+ balancer.Balancer
+ cc balancer.ClientConn
+ logger *internalgrpclog.PrefixLogger
}
-func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- subConnsLen := uint32(len(p.subConns))
- nextIndex := atomic.AddUint32(&p.next, 1)
+func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error {
+ return b.Balancer.UpdateClientConnState(balancer.ClientConnState{
+ // Enable the health listener in pickfirst children for client side health
+ // checks and outlier detection, if configured.
+ ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState),
+ })
+}
- sc := p.subConns[nextIndex%subConnsLen]
- return balancer.PickResult{SubConn: sc}, nil
+func (b *rrBalancer) ExitIdle() {
+ // Should always be ok, as child is endpoint sharding.
+ if ei, ok := b.Balancer.(balancer.ExitIdler); ok {
+ ei.ExitIdle()
+ }
}
diff --git a/vendor/google.golang.org/grpc/balancer/subconn.go b/vendor/google.golang.org/grpc/balancer/subconn.go
index ea27c4fa7..9ee44d4af 100644
--- a/vendor/google.golang.org/grpc/balancer/subconn.go
+++ b/vendor/google.golang.org/grpc/balancer/subconn.go
@@ -44,7 +44,7 @@ import (
// should only use a single address.
//
// NOTICE: This interface is intended to be implemented by gRPC, or intercepted
-// by custom load balancing poilices. Users should not need their own complete
+// by custom load balancing polices. Users should not need their own complete
// implementation of this interface -- they should always delegate to a SubConn
// returned by ClientConn.NewSubConn() by embedding it in their implementations.
// An embedded SubConn must never be nil, or runtime panics will occur.
diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
index 905817b5f..948a21ef6 100644
--- a/vendor/google.golang.org/grpc/balancer_wrapper.go
+++ b/vendor/google.golang.org/grpc/balancer_wrapper.go
@@ -26,6 +26,7 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/balancer/gracefulswitch"
"google.golang.org/grpc/internal/channelz"
@@ -34,7 +35,15 @@ import (
"google.golang.org/grpc/status"
)
-var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
+var (
+ setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
+ // noOpRegisterHealthListenerFn is used when client side health checking is
+ // disabled. It sends a single READY update on the registered listener.
+ noOpRegisterHealthListenerFn = func(_ context.Context, listener func(balancer.SubConnState)) func() {
+ listener(balancer.SubConnState{ConnectivityState: connectivity.Ready})
+ return func() {}
+ }
+)
// ccBalancerWrapper sits between the ClientConn and the Balancer.
//
@@ -51,6 +60,7 @@ var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnSt
// It uses the gracefulswitch.Balancer internally to ensure that balancer
// switches happen in a graceful manner.
type ccBalancerWrapper struct {
+ internal.EnforceClientConnEmbedding
// The following fields are initialized when the wrapper is created and are
// read-only afterwards, and therefore can be accessed without a mutex.
cc *ClientConn
@@ -84,7 +94,6 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
CustomUserAgent: cc.dopts.copts.UserAgent,
ChannelzParent: cc.channelz,
Target: cc.parsedTarget,
- MetricsRecorder: cc.metricsRecorderList,
},
serializer: grpcsync.NewCallbackSerializer(ctx),
serializerCancel: cancel,
@@ -93,6 +102,10 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
return ccb
}
+func (ccb *ccBalancerWrapper) MetricsRecorder() stats.MetricsRecorder {
+ return ccb.cc.metricsRecorderList
+}
+
// updateClientConnState is invoked by grpc to push a ClientConnState update to
// the underlying balancer. This is always executed from the serializer, so
// it is safe to call into the balancer here.
@@ -277,10 +290,17 @@ type healthData struct {
// to the LB policy. This is stored to avoid sending updates when the
// SubConn has already exited connectivity state READY.
connectivityState connectivity.State
+ // closeHealthProducer stores function to close the ref counted health
+ // producer. The health producer is automatically closed when the SubConn
+ // state changes.
+ closeHealthProducer func()
}
func newHealthData(s connectivity.State) *healthData {
- return &healthData{connectivityState: s}
+ return &healthData{
+ connectivityState: s,
+ closeHealthProducer: func() {},
+ }
}
// updateState is invoked by grpc to push a subConn state update to the
@@ -400,7 +420,7 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (
}
acbw.producersMu.Unlock()
}
- return pData.producer, grpcsync.OnceFunc(unref)
+ return pData.producer, sync.OnceFunc(unref)
}
func (acbw *acBalancerWrapper) closeProducers() {
@@ -413,6 +433,37 @@ func (acbw *acBalancerWrapper) closeProducers() {
}
}
+// healthProducerRegisterFn is a type alias for the health producer's function
+// for registering listeners.
+type healthProducerRegisterFn = func(context.Context, balancer.SubConn, string, func(balancer.SubConnState)) func()
+
+// healthListenerRegFn returns a function to register a listener for health
+// updates. If client side health checks are disabled, the registered listener
+// will get a single READY (raw connectivity state) update.
+//
+// Client side health checking is enabled when all the following
+// conditions are satisfied:
+// 1. Health checking is not disabled using the dial option.
+// 2. The health package is imported.
+// 3. The health check config is present in the service config.
+func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(balancer.SubConnState)) func() {
+ if acbw.ccb.cc.dopts.disableHealthCheck {
+ return noOpRegisterHealthListenerFn
+ }
+ regHealthLisFn := internal.RegisterClientHealthCheckListener
+ if regHealthLisFn == nil {
+ // The health package is not imported.
+ return noOpRegisterHealthListenerFn
+ }
+ cfg := acbw.ac.cc.healthCheckConfig()
+ if cfg == nil {
+ return noOpRegisterHealthListenerFn
+ }
+ return func(ctx context.Context, listener func(balancer.SubConnState)) func() {
+ return regHealthLisFn.(healthProducerRegisterFn)(ctx, acbw, cfg.ServiceName, listener)
+ }
+}
+
// RegisterHealthListener accepts a health listener from the LB policy. It sends
// updates to the health listener as long as the SubConn's connectivity state
// doesn't change and a new health listener is not registered. To invalidate
@@ -421,6 +472,7 @@ func (acbw *acBalancerWrapper) closeProducers() {
func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) {
acbw.healthMu.Lock()
defer acbw.healthMu.Unlock()
+ acbw.healthData.closeHealthProducer()
// listeners should not be registered when the connectivity state
// isn't Ready. This may happen when the balancer registers a listener
// after the connectivityState is updated, but before it is notified
@@ -436,6 +488,7 @@ func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.Sub
return
}
+ registerFn := acbw.healthListenerRegFn()
acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || acbw.ccb.balancer == nil {
return
@@ -443,10 +496,25 @@ func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.Sub
// Don't send updates if a new listener is registered.
acbw.healthMu.Lock()
defer acbw.healthMu.Unlock()
- curHD := acbw.healthData
- if curHD != hd {
+ if acbw.healthData != hd {
return
}
- listener(balancer.SubConnState{ConnectivityState: connectivity.Ready})
+ // Serialize the health updates from the health producer with
+ // other calls into the LB policy.
+ listenerWrapper := func(scs balancer.SubConnState) {
+ acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
+ if ctx.Err() != nil || acbw.ccb.balancer == nil {
+ return
+ }
+ acbw.healthMu.Lock()
+ defer acbw.healthMu.Unlock()
+ if acbw.healthData != hd {
+ return
+ }
+ listener(scs)
+ })
+ }
+
+ hd.closeHealthProducer = registerFn(ctx, listenerWrapper)
})
}
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index 9e9d08069..825c31795 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.5
// protoc v5.27.1
// source: grpc/binlog/v1/binarylog.proto
@@ -31,6 +31,7 @@ import (
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
const (
@@ -233,10 +234,7 @@ func (Address_Type) EnumDescriptor() ([]byte, []int) {
// Log entry we store in binary logs
type GrpcLogEntry struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The timestamp of the binary log message
Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
// Uniquely identifies a call. The value must not be 0 in order to disambiguate
@@ -255,7 +253,7 @@ type GrpcLogEntry struct {
// The logger uses one of the following fields to record the payload,
// according to the type of the log entry.
//
- // Types that are assignable to Payload:
+ // Types that are valid to be assigned to Payload:
//
// *GrpcLogEntry_ClientHeader
// *GrpcLogEntry_ServerHeader
@@ -269,7 +267,9 @@ type GrpcLogEntry struct {
// EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in
// the case of trailers-only. On server side, peer is always
// logged on EVENT_TYPE_CLIENT_HEADER.
- Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
+ Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GrpcLogEntry) Reset() {
@@ -337,37 +337,45 @@ func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
return GrpcLogEntry_LOGGER_UNKNOWN
}
-func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
- if m != nil {
- return m.Payload
+func (x *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
+ if x != nil {
+ return x.Payload
}
return nil
}
func (x *GrpcLogEntry) GetClientHeader() *ClientHeader {
- if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
- return x.ClientHeader
+ if x != nil {
+ if x, ok := x.Payload.(*GrpcLogEntry_ClientHeader); ok {
+ return x.ClientHeader
+ }
}
return nil
}
func (x *GrpcLogEntry) GetServerHeader() *ServerHeader {
- if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
- return x.ServerHeader
+ if x != nil {
+ if x, ok := x.Payload.(*GrpcLogEntry_ServerHeader); ok {
+ return x.ServerHeader
+ }
}
return nil
}
func (x *GrpcLogEntry) GetMessage() *Message {
- if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok {
- return x.Message
+ if x != nil {
+ if x, ok := x.Payload.(*GrpcLogEntry_Message); ok {
+ return x.Message
+ }
}
return nil
}
func (x *GrpcLogEntry) GetTrailer() *Trailer {
- if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok {
- return x.Trailer
+ if x != nil {
+ if x, ok := x.Payload.(*GrpcLogEntry_Trailer); ok {
+ return x.Trailer
+ }
}
return nil
}
@@ -416,10 +424,7 @@ func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {}
func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {}
type ClientHeader struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// This contains only the metadata from the application.
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// The name of the RPC method, which looks something like:
@@ -433,7 +438,9 @@ type ClientHeader struct {
// or : .
Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
// the RPC timeout
- Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
+ Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ClientHeader) Reset() {
@@ -495,12 +502,11 @@ func (x *ClientHeader) GetTimeout() *durationpb.Duration {
}
type ServerHeader struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// This contains only the metadata from the application.
- Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ServerHeader) Reset() {
@@ -541,10 +547,7 @@ func (x *ServerHeader) GetMetadata() *Metadata {
}
type Trailer struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// This contains only the metadata from the application.
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// The gRPC status code.
@@ -555,6 +558,8 @@ type Trailer struct {
// The value of the 'grpc-status-details-bin' metadata key. If
// present, this is always an encoded 'google.rpc.Status' message.
StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Trailer) Reset() {
@@ -617,15 +622,14 @@ func (x *Trailer) GetStatusDetails() []byte {
// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE
type Message struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Length of the message. It may not be the same as the length of the
// data field, as the logging payload can be truncated or omitted.
Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
// May be truncated or omitted.
- Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+ Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Message) Reset() {
@@ -694,11 +698,10 @@ func (x *Message) GetData() []byte {
// header is just a normal metadata key.
// The pair will not count towards the size limit.
type Metadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *Metadata) Reset() {
@@ -740,12 +743,11 @@ func (x *Metadata) GetEntry() []*MetadataEntry {
// A metadata key value pair
type MetadataEntry struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *MetadataEntry) Reset() {
@@ -794,14 +796,13 @@ func (x *MetadataEntry) GetValue() []byte {
// Address information
type Address struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
- Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
+ Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
// only for TYPE_IPV4 and TYPE_IPV6
- IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
+ IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Address) Reset() {
@@ -857,7 +858,7 @@ func (x *Address) GetIpPort() uint32 {
var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor
-var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{
+var file_grpc_binlog_v1_binarylog_proto_rawDesc = string([]byte{
0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31,
0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
@@ -983,16 +984,16 @@ var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{
0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69,
0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
-}
+})
var (
file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once
- file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc
+ file_grpc_binlog_v1_binarylog_proto_rawDescData []byte
)
func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte {
file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() {
- file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData)
+ file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc)))
})
return file_grpc_binlog_v1_binarylog_proto_rawDescData
}
@@ -1051,7 +1052,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc)),
NumEnums: 3,
NumMessages: 8,
NumExtensions: 0,
@@ -1063,7 +1064,6 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes,
}.Build()
File_grpc_binlog_v1_binarylog_proto = out.File
- file_grpc_binlog_v1_binarylog_proto_rawDesc = nil
file_grpc_binlog_v1_binarylog_proto_goTypes = nil
file_grpc_binlog_v1_binarylog_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 4f57b5543..4f350ca56 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -118,12 +118,26 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires
// NewClient creates a new gRPC "channel" for the target URI provided. No I/O
// is performed. Use of the ClientConn for RPCs will automatically cause it to
-// connect. Connect may be used to manually create a connection, but for most
-// users this is unnecessary.
+// connect. The Connect method may be called to manually create a connection,
+// but for most users this should be unnecessary.
//
// The target name syntax is defined in
-// https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns
-// resolver, a "dns:///" prefix should be applied to the target.
+// https://github.com/grpc/grpc/blob/master/doc/naming.md. E.g. to use the dns
+// name resolver, a "dns:///" prefix may be applied to the target. The default
+// name resolver will be used if no scheme is detected, or if the parsed scheme
+// is not a registered name resolver. The default resolver is "dns" but can be
+// overridden using the resolver package's SetDefaultScheme.
+//
+// Examples:
+//
+// - "foo.googleapis.com:8080"
+// - "dns:///foo.googleapis.com:8080"
+// - "dns:///foo.googleapis.com"
+// - "dns:///10.0.0.213:8080"
+// - "dns:///%5B2001:db8:85a3:8d3:1319:8a2e:370:7348%5D:443"
+// - "dns://8.8.8.8/foo.googleapis.com:8080"
+// - "dns://8.8.8.8/foo.googleapis.com"
+// - "zookeeper://zk.example.com:9900/example_service"
//
// The DialOptions returned by WithBlock, WithTimeout,
// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this
@@ -181,7 +195,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
}
cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
}
- cc.mkp = cc.dopts.copts.KeepaliveParams
+ cc.keepaliveParams = cc.dopts.copts.KeepaliveParams
if err = cc.initAuthority(); err != nil {
return nil, err
@@ -225,7 +239,12 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) {
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
// At the end of this method, we kick the channel out of idle, rather than
// waiting for the first rpc.
- opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...)
+ //
+ // WithLocalDNSResolution dial option in `grpc.Dial` ensures that it
+ // preserves behavior: when default scheme passthrough is used, skip
+ // hostname resolution, when "dns" is used for resolution, perform
+ // resolution on the client.
+ opts = append([]DialOption{withDefaultScheme("passthrough"), WithLocalDNSResolution()}, opts...)
cc, err := NewClient(target, opts...)
if err != nil {
return nil, err
@@ -618,7 +637,7 @@ type ClientConn struct {
balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close.
sc *ServiceConfig // Latest service config received from the resolver.
conns map[*addrConn]struct{} // Set to nil on close.
- mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway.
+ keepaliveParams keepalive.ClientParameters // May be updated upon receipt of a GoAway.
// firstResolveEvent is used to track whether the name resolver sent us at
// least one update. RPCs block on this event. May be accessed without mu
// if we know we cannot be asked to enter idle mode while accessing it (e.g.
@@ -867,7 +886,13 @@ func (cc *ClientConn) Target() string {
return cc.target
}
-// CanonicalTarget returns the canonical target string of the ClientConn.
+// CanonicalTarget returns the canonical target string used when creating cc.
+//
+// This always has the form "://[authority]/". For example:
+//
+// - "dns:///example.com:42"
+// - "dns://8.8.8.8/example.com:42"
+// - "unix:///path/to/socket"
func (cc *ClientConn) CanonicalTarget() string {
return cc.parsedTarget.String()
}
@@ -1206,12 +1231,11 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
// adjustParams updates parameters used to create transports upon
// receiving a GoAway.
func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
- switch r {
- case transport.GoAwayTooManyPings:
+ if r == transport.GoAwayTooManyPings {
v := 2 * ac.dopts.copts.KeepaliveParams.Time
ac.cc.mu.Lock()
- if v > ac.cc.mkp.Time {
- ac.cc.mkp.Time = v
+ if v > ac.cc.keepaliveParams.Time {
+ ac.cc.keepaliveParams.Time = v
}
ac.cc.mu.Unlock()
}
@@ -1307,7 +1331,7 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c
ac.mu.Lock()
ac.cc.mu.RLock()
- ac.dopts.copts.KeepaliveParams = ac.cc.mkp
+ ac.dopts.copts.KeepaliveParams = ac.cc.keepaliveParams
ac.cc.mu.RUnlock()
copts := ac.dopts.copts
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
index e163a473d..bd5fe22b6 100644
--- a/vendor/google.golang.org/grpc/credentials/tls.go
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
@@ -32,6 +32,8 @@ import (
"google.golang.org/grpc/internal/envconfig"
)
+const alpnFailureHelpMessage = "If you upgraded from a grpc-go version earlier than 1.67, your TLS connections may have stopped working due to ALPN enforcement. For more details, see: https://github.com/grpc/grpc-go/issues/434"
+
var logger = grpclog.Component("credentials")
// TLSInfo contains the auth information for a TLS authenticated connection.
@@ -128,7 +130,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon
if np == "" {
if envconfig.EnforceALPNEnabled {
conn.Close()
- return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property")
+ return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage)
}
logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName)
}
@@ -158,7 +160,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
if cs.NegotiatedProtocol == "" {
if envconfig.EnforceALPNEnabled {
conn.Close()
- return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property")
+ return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage)
} else if logger.V(2) {
logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases")
}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index 7494ae591..405a2ffeb 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -73,7 +73,7 @@ type dialOptions struct {
chainUnaryInts []UnaryClientInterceptor
chainStreamInts []StreamClientInterceptor
- cp Compressor
+ compressorV0 Compressor
dc Decompressor
bs internalbackoff.Strategy
block bool
@@ -94,6 +94,8 @@ type dialOptions struct {
idleTimeout time.Duration
defaultScheme string
maxCallAttempts int
+ enableLocalDNSResolution bool // Specifies if target hostnames should be resolved when proxying is enabled.
+ useProxy bool // Specifies if a server should be connected via proxy.
}
// DialOption configures how we set up the connection.
@@ -256,7 +258,7 @@ func WithCodec(c Codec) DialOption {
// Deprecated: use UseCompressor instead. Will be supported throughout 1.x.
func WithCompressor(cp Compressor) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.cp = cp
+ o.compressorV0 = cp
})
}
@@ -377,7 +379,22 @@ func WithInsecure() DialOption {
// later release.
func WithNoProxy() DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.copts.UseProxy = false
+ o.useProxy = false
+ })
+}
+
+// WithLocalDNSResolution forces local DNS name resolution even when a proxy is
+// specified in the environment. By default, the server name is provided
+// directly to the proxy as part of the CONNECT handshake. This is ignored if
+// WithNoProxy is used.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithLocalDNSResolution() DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.enableLocalDNSResolution = true
})
}
@@ -428,6 +445,11 @@ func WithTimeout(d time.Duration) DialOption {
// returned by f, gRPC checks the error's Temporary() method to decide if it
// should try to reconnect to the network address.
//
+// Note that gRPC by default performs name resolution on the target passed to
+// NewClient. To bypass name resolution and cause the target string to be
+// passed directly to the dialer here instead, use the "passthrough" resolver
+// by specifying it in the target string, e.g. "passthrough:target".
+//
// Note: All supported releases of Go (as of December 2023) override the OS
// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
// with OS defaults for keepalive time and interval, use a net.Dialer that sets
@@ -662,14 +684,15 @@ func defaultDialOptions() dialOptions {
copts: transport.ConnectOptions{
ReadBufferSize: defaultReadBufSize,
WriteBufferSize: defaultWriteBufSize,
- UseProxy: true,
UserAgent: grpcUA,
BufferPool: mem.DefaultBufferPool(),
},
- bs: internalbackoff.DefaultExponential,
- idleTimeout: 30 * time.Minute,
- defaultScheme: "dns",
- maxCallAttempts: defaultMaxCallAttempts,
+ bs: internalbackoff.DefaultExponential,
+ idleTimeout: 30 * time.Minute,
+ defaultScheme: "dns",
+ maxCallAttempts: defaultMaxCallAttempts,
+ useProxy: true,
+ enableLocalDNSResolution: false,
}
}
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
index 73bb4c4ee..fbc1ca356 100644
--- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
@@ -109,8 +109,9 @@ func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error
return nil, errBalancerClosed
}
bw := &balancerWrapper{
- builder: builder,
- gsb: gsb,
+ ClientConn: gsb.cc,
+ builder: builder,
+ gsb: gsb,
lastState: balancer.State{
ConnectivityState: connectivity.Connecting,
Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
@@ -293,6 +294,7 @@ func (gsb *Balancer) Close() {
// State updates from the wrapped balancer can result in invocation of the
// graceful switch logic.
type balancerWrapper struct {
+ balancer.ClientConn
balancer.Balancer
gsb *Balancer
builder balancer.Builder
@@ -413,7 +415,3 @@ func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver
bw.gsb.mu.Unlock()
bw.gsb.cc.UpdateAddresses(sc, addrs)
}
-
-func (bw *balancerWrapper) Target() string {
- return bw.gsb.cc.Target()
-}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 6e7dd6b77..cc5713fd9 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -49,12 +49,26 @@ var (
// XDSFallbackSupport is the env variable that controls whether support for
// xDS fallback is turned on. If this is unset or is false, only the first
// xDS server in the list of server configs will be used.
- XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false)
+ XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true)
// NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
- // instead of the exiting pickfirst implementation. This can be enabled by
+ // instead of the exiting pickfirst implementation. This can be disabled by
// setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"
- // to "true".
- NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false)
+ // to "false".
+ NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true)
+
+ // XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash
+ // key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by
+ // setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the
+ // implementation of A76 is stable, we will flip the default value to false
+ // in a subsequent release. A final release will remove this environment
+ // variable, enabling the new behavior unconditionally.
+ XDSEndpointHashKeyBackwardCompat = boolFromEnv("GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT", true)
+
+ // RingHashSetRequestHashKey is set if the ring hash balancer can get the
+ // request hash header by setting the "requestHashHeader" field, according
+ // to gRFC A76. It can be enabled by setting the environment variable
+ // "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "true".
+ RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", false)
)
func boolFromEnv(envVar string, def bool) bool {
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
index 29f234acb..2eb97f832 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -53,4 +53,14 @@ var (
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
+
+ // XDSDualstackEndpointsEnabled is true if gRPC should read the
+ // "additional addresses" in the xDS endpoint resource.
+ XDSDualstackEndpointsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_DUALSTACK_ENDPOINTS", true)
+
+ // XDSSystemRootCertsEnabled is true when xDS enabled gRPC clients can use
+ // the system's default root certificates for TLS certificate validation.
+ // For more details, see:
+ // https://github.com/grpc/proposal/blob/master/A82-xds-system-root-certs.md.
+ XDSSystemRootCertsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_SYSTEM_ROOT_CERTS", false)
)
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
deleted file mode 100644
index 6635f7bca..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- *
- * Copyright 2022 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpcsync
-
-import (
- "sync"
-)
-
-// OnceFunc returns a function wrapping f which ensures f is only executed
-// once even if the returned function is executed multiple times.
-func OnceFunc(f func()) func() {
- var once sync.Once
- return func() {
- once.Do(f)
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 3afc18134..2ce012cda 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -31,6 +31,10 @@ import (
var (
// HealthCheckFunc is used to provide client-side LB channel health checking
HealthCheckFunc HealthChecker
+ // RegisterClientHealthCheckListener is used to provide a listener for
+ // updates from the client-side health checking service. It returns a
+ // function that can be called to stop the health producer.
+ RegisterClientHealthCheckListener any // func(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func()
// BalancerUnregister is exported by package balancer to unregister a balancer.
BalancerUnregister func(name string)
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
@@ -60,6 +64,9 @@ var (
// gRPC server. An xDS-enabled server needs to know what type of credentials
// is configured on the underlying gRPC server. This is set by server.go.
GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials
+ // MetricsRecorderForServer returns the MetricsRecorderList derived from a
+ // server's stats handlers.
+ MetricsRecorderForServer any // func (*grpc.Server) estats.MetricsRecorder
// CanonicalString returns the canonical string of the code defined here:
// https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
//
@@ -147,6 +154,20 @@ var (
// other features, including the CSDS service.
NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error)
+ // NewXDSResolverWithPoolForTesting creates a new xDS resolver builder
+ // using the provided xDS pool instead of creating a new one using the
+ // bootstrap configuration specified by the supported environment variables.
+ // The resolver.Builder is meant to be used in conjunction with the
+ // grpc.WithResolvers DialOption. The resolver.Builder does not take
+ // ownership of the provided xDS client and it is the responsibility of the
+ // caller to close the client when no longer required.
+ //
+ // Testing Only
+ //
+ // This function should ONLY be used for testing and may not work with some
+ // other features, including the CSDS service.
+ NewXDSResolverWithPoolForTesting any // func(*xdsclient.Pool) (resolver.Builder, error)
+
// NewXDSResolverWithClientForTesting creates a new xDS resolver builder
// using the provided xDS client instead of creating a new one using the
// bootstrap configuration specified by the supported environment variables.
@@ -238,6 +259,13 @@ var (
// SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for
// testing purposes.
SetBufferPoolingThresholdForTesting any // func(int)
+
+ // TimeAfterFunc is used to create timers. During tests the function is
+ // replaced to track allocated timers and fail the test if a timer isn't
+ // cancelled.
+ TimeAfterFunc = func(d time.Duration, f func()) Timer {
+ return time.AfterFunc(d, f)
+ }
)
// HealthChecker defines the signature of the client-side LB channel health
@@ -273,3 +301,15 @@ const RLSLoadBalancingPolicyName = "rls_experimental"
type EnforceSubConnEmbedding interface {
enforceSubConnEmbedding()
}
+
+// EnforceClientConnEmbedding is used to enforce proper ClientConn implementation
+// embedding.
+type EnforceClientConnEmbedding interface {
+ enforceClientConnEmbedding()
+}
+
+// Timer is an interface to allow injecting different time.Timer implementations
+// during tests.
+type Timer interface {
+ Stop() bool
+}
diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
index 900bfb716..c4055bc00 100644
--- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
@@ -97,13 +97,11 @@ func hasNotPrintable(msg string) bool {
return false
}
-// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) :
-//
-// - key must contain one or more characters.
-// - the characters in the key must be contained in [0-9 a-z _ - .].
-// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed.
-// - the characters in the every value must be printable (in [%x20-%x7E]).
-func ValidatePair(key string, vals ...string) error {
+// ValidateKey validates a key with the following rules (pseudo-headers are
+// skipped):
+// - the key must contain one or more characters.
+// - the characters in the key must be in [0-9 a-z _ - .].
+func ValidateKey(key string) error {
// key should not be empty
if key == "" {
return fmt.Errorf("there is an empty key in the header")
@@ -119,6 +117,20 @@ func ValidatePair(key string, vals ...string) error {
return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key)
}
}
+ return nil
+}
+
+// ValidatePair validates a key-value pair with the following rules
+// (pseudo-header are skipped):
+// - the key must contain one or more characters.
+// - the characters in the key must be in [0-9 a-z _ - .].
+// - if the key ends with a "-bin" suffix, no validation of the corresponding
+// value is performed.
+// - the characters in every value must be printable (in [%x20-%x7E]).
+func ValidatePair(key string, vals ...string) error {
+ if err := ValidateKey(key); err != nil {
+ return err
+ }
if strings.HasSuffix(key, "-bin") {
return nil
}
diff --git a/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go b/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go
new file mode 100644
index 000000000..1f61f1a49
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package proxyattributes contains functions for getting and setting proxy
+// attributes like the CONNECT address and user info.
+package proxyattributes
+
+import (
+ "net/url"
+
+ "google.golang.org/grpc/resolver"
+)
+
+type keyType string
+
+const proxyOptionsKey = keyType("grpc.resolver.delegatingresolver.proxyOptions")
+
+// Options holds the proxy connection details needed during the CONNECT
+// handshake.
+type Options struct {
+ User *url.Userinfo
+ ConnectAddr string
+}
+
+// Set returns a copy of addr with opts set in its attributes.
+func Set(addr resolver.Address, opts Options) resolver.Address {
+ addr.Attributes = addr.Attributes.WithValue(proxyOptionsKey, opts)
+ return addr
+}
+
+// Get returns the Options for the proxy [resolver.Address] and a boolean
+// value representing if the attribute is present or not. The returned data
+// should not be mutated.
+func Get(addr resolver.Address) (Options, bool) {
+ if a := addr.Attributes.Value(proxyOptionsKey); a != nil {
+ return a.(Options), true
+ }
+ return Options{}, false
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
new file mode 100644
index 000000000..c0e227577
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
@@ -0,0 +1,412 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package delegatingresolver implements a resolver capable of resolving both
+// target URIs and proxy addresses.
+package delegatingresolver
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "sync"
+
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/proxyattributes"
+ "google.golang.org/grpc/internal/transport"
+ "google.golang.org/grpc/internal/transport/networktype"
+ "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/serviceconfig"
+)
+
+var (
+ logger = grpclog.Component("delegating-resolver")
+ // HTTPSProxyFromEnvironment will be overwritten in the tests
+ HTTPSProxyFromEnvironment = http.ProxyFromEnvironment
+)
+
+// delegatingResolver manages both target URI and proxy address resolution by
+// delegating these tasks to separate child resolvers. Essentially, it acts as
+// an intermediary between the gRPC ClientConn and the child resolvers.
+//
+// It implements the [resolver.Resolver] interface.
+type delegatingResolver struct {
+ target resolver.Target // parsed target URI to be resolved
+ cc resolver.ClientConn // gRPC ClientConn
+ proxyURL *url.URL // proxy URL, derived from proxy environment and target
+
+ // We do not hold both mu and childMu in the same goroutine. Avoid holding
+ // both locks when calling into the child, as the child resolver may
+ // synchronously callback into the channel.
+ mu sync.Mutex // protects all the fields below
+ targetResolverState *resolver.State // state of the target resolver
+ proxyAddrs []resolver.Address // resolved proxy addresses; empty if no proxy is configured
+
+ // childMu serializes calls into child resolvers. It also protects access to
+ // the following fields.
+ childMu sync.Mutex
+ targetResolver resolver.Resolver // resolver for the target URI, based on its scheme
+ proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured
+}
+
+// nopResolver is a resolver that does nothing.
+type nopResolver struct{}
+
+func (nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
+
+func (nopResolver) Close() {}
+
+// proxyURLForTarget determines the proxy URL for the given address based on the
+// environment. It can return the following:
+// - nil URL, nil error: No proxy is configured or the address is excluded
+// using the `NO_PROXY` environment variable or if req.URL.Host is
+// "localhost" (with or without // a port number)
+// - nil URL, non-nil error: An error occurred while retrieving the proxy URL.
+// - non-nil URL, nil error: A proxy is configured, and the proxy URL was
+// retrieved successfully without any errors.
+func proxyURLForTarget(address string) (*url.URL, error) {
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: address,
+ }}
+ return HTTPSProxyFromEnvironment(req)
+}
+
+// New creates a new delegating resolver that can create up to two child
+// resolvers:
+// - one to resolve the proxy address specified using the supported
+// environment variables. This uses the registered resolver for the "dns"
+// scheme. It is lazily built when a target resolver update contains at least
+// one TCP address.
+// - one to resolve the target URI using the resolver specified by the scheme
+// in the target URI or specified by the user using the WithResolvers dial
+// option. As a special case, if the target URI's scheme is "dns" and a
+// proxy is specified using the supported environment variables, the target
+// URI's path portion is used as the resolved address unless target
+// resolution is enabled using the dial option.
+func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions, targetResolverBuilder resolver.Builder, targetResolutionEnabled bool) (resolver.Resolver, error) {
+ r := &delegatingResolver{
+ target: target,
+ cc: cc,
+ proxyResolver: nopResolver{},
+ targetResolver: nopResolver{},
+ }
+
+ var err error
+ r.proxyURL, err = proxyURLForTarget(target.Endpoint())
+ if err != nil {
+ return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err)
+ }
+
+ // proxy is not configured or proxy address excluded using `NO_PROXY` env
+ // var, so only target resolver is used.
+ if r.proxyURL == nil {
+ return targetResolverBuilder.Build(target, cc, opts)
+ }
+
+ if logger.V(2) {
+ logger.Infof("Proxy URL detected : %s", r.proxyURL)
+ }
+
+ // Resolver updates from one child may trigger calls into the other. Block
+ // updates until the children are initialized.
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
+ // When the scheme is 'dns' and target resolution on client is not enabled,
+ // resolution should be handled by the proxy, not the client. Therefore, we
+ // bypass the target resolver and store the unresolved target address.
+ if target.URL.Scheme == "dns" && !targetResolutionEnabled {
+ r.targetResolverState = &resolver.State{
+ Addresses: []resolver.Address{{Addr: target.Endpoint()}},
+ Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}},
+ }
+ r.updateTargetResolverState(*r.targetResolverState)
+ return r, nil
+ }
+ wcc := &wrappingClientConn{
+ stateListener: r.updateTargetResolverState,
+ parent: r,
+ }
+ if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil {
+ return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err)
+ }
+ return r, nil
+}
+
+// proxyURIResolver creates a resolver for resolving proxy URIs using the "dns"
+// scheme. It adjusts the proxyURL to conform to the "dns:///" format and builds
+// a resolver with a wrappingClientConn to capture resolved addresses.
+func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resolver.Resolver, error) {
+ proxyBuilder := resolver.Get("dns")
+ if proxyBuilder == nil {
+ panic("delegating_resolver: resolver for proxy not found for scheme dns")
+ }
+ url := *r.proxyURL
+ url.Scheme = "dns"
+ url.Path = "/" + r.proxyURL.Host
+ url.Host = "" // Clear the Host field to conform to the "dns:///" format
+
+ proxyTarget := resolver.Target{URL: url}
+ wcc := &wrappingClientConn{
+ stateListener: r.updateProxyResolverState,
+ parent: r,
+ }
+ return proxyBuilder.Build(proxyTarget, wcc, opts)
+}
+
+func (r *delegatingResolver) ResolveNow(o resolver.ResolveNowOptions) {
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
+ r.targetResolver.ResolveNow(o)
+ r.proxyResolver.ResolveNow(o)
+}
+
+func (r *delegatingResolver) Close() {
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
+ r.targetResolver.Close()
+ r.targetResolver = nil
+
+ r.proxyResolver.Close()
+ r.proxyResolver = nil
+}
+
+func networkTypeFromAddr(addr resolver.Address) string {
+ networkType, ok := networktype.Get(addr)
+ if !ok {
+ networkType, _ = transport.ParseDialTarget(addr.Addr)
+ }
+ return networkType
+}
+
+func isTCPAddressPresent(state *resolver.State) bool {
+ for _, addr := range state.Addresses {
+ if networkType := networkTypeFromAddr(addr); networkType == "tcp" {
+ return true
+ }
+ }
+ for _, endpoint := range state.Endpoints {
+ for _, addr := range endpoint.Addresses {
+ if networktype := networkTypeFromAddr(addr); networktype == "tcp" {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// updateClientConnStateLocked constructs a combined list of addresses by
+// pairing each proxy address with every target address of type TCP. For each
+// pair, it creates a new [resolver.Address] using the proxy address and
+// attaches the corresponding target address and user info as attributes. Target
+// addresses that are not of type TCP are appended to the list as-is. The
+// function returns nil if either resolver has not yet provided an update, and
+// returns the result of ClientConn.UpdateState once both resolvers have
+// provided at least one update.
+func (r *delegatingResolver) updateClientConnStateLocked() error {
+ if r.targetResolverState == nil || r.proxyAddrs == nil {
+ return nil
+ }
+
+ // If multiple resolved proxy addresses are present, we send only the
+ // unresolved proxy host and let net.Dial handle the proxy host name
+ // resolution when creating the transport. Sending all resolved addresses
+ // would increase the number of addresses passed to the ClientConn and
+ // subsequently to load balancing (LB) policies like Round Robin, leading
+ // to additional TCP connections. However, if there's only one resolved
+ // proxy address, we send it directly, as it doesn't affect the address
+ // count returned by the target resolver and the address count sent to the
+ // ClientConn.
+ var proxyAddr resolver.Address
+ if len(r.proxyAddrs) == 1 {
+ proxyAddr = r.proxyAddrs[0]
+ } else {
+ proxyAddr = resolver.Address{Addr: r.proxyURL.Host}
+ }
+ var addresses []resolver.Address
+ for _, targetAddr := range (*r.targetResolverState).Addresses {
+ // Avoid proxy when network is not tcp.
+ if networkType := networkTypeFromAddr(targetAddr); networkType != "tcp" {
+ addresses = append(addresses, targetAddr)
+ continue
+ }
+ addresses = append(addresses, proxyattributes.Set(proxyAddr, proxyattributes.Options{
+ User: r.proxyURL.User,
+ ConnectAddr: targetAddr.Addr,
+ }))
+ }
+
+ // For each target endpoint, construct a new [resolver.Endpoint] that
+ // includes all addresses from all proxy endpoints and the addresses from
+ // that target endpoint, preserving the number of target endpoints.
+ var endpoints []resolver.Endpoint
+ for _, endpt := range (*r.targetResolverState).Endpoints {
+ var addrs []resolver.Address
+ for _, targetAddr := range endpt.Addresses {
+ // Avoid proxy when network is not tcp.
+ if networkType := networkTypeFromAddr(targetAddr); networkType != "tcp" {
+ addrs = append(addrs, targetAddr)
+ continue
+ }
+ for _, proxyAddr := range r.proxyAddrs {
+ addrs = append(addrs, proxyattributes.Set(proxyAddr, proxyattributes.Options{
+ User: r.proxyURL.User,
+ ConnectAddr: targetAddr.Addr,
+ }))
+ }
+ }
+ endpoints = append(endpoints, resolver.Endpoint{Addresses: addrs})
+ }
+ // Use the targetResolverState for its service config and attributes
+ // contents. The state update is only sent after both the target and proxy
+ // resolvers have sent their updates, and curState has been updated with the
+ // combined addresses.
+ curState := *r.targetResolverState
+ curState.Addresses = addresses
+ curState.Endpoints = endpoints
+ return r.cc.UpdateState(curState)
+}
+
+// updateProxyResolverState updates the proxy resolver state by storing proxy
+// addresses and endpoints, marking the resolver as ready, and triggering a
+// state update if both proxy and target resolvers are ready. If the ClientConn
+// returns a non-nil error, it calls `ResolveNow()` on the target resolver. It
+// is a StateListener function of wrappingClientConn passed to the proxy
+// resolver.
+func (r *delegatingResolver) updateProxyResolverState(state resolver.State) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if logger.V(2) {
+ logger.Infof("Addresses received from proxy resolver: %s", state.Addresses)
+ }
+ if len(state.Endpoints) > 0 {
+ // We expect exactly one address per endpoint because the proxy resolver
+ // uses "dns" resolution.
+ r.proxyAddrs = make([]resolver.Address, 0, len(state.Endpoints))
+ for _, endpoint := range state.Endpoints {
+ r.proxyAddrs = append(r.proxyAddrs, endpoint.Addresses...)
+ }
+ } else if state.Addresses != nil {
+ r.proxyAddrs = state.Addresses
+ } else {
+ r.proxyAddrs = []resolver.Address{} // ensure proxyAddrs is non-nil to indicate an update has been received
+ }
+ err := r.updateClientConnStateLocked()
+ // Another possible approach was to block until updates are received from
+ // both resolvers. But this is not used because calling `New()` triggers
+ // `Build()` for the first resolver, which calls `UpdateState()`. And the
+ // second resolver hasn't sent an update yet, so it would cause `New()` to
+ // block indefinitely.
+ if err != nil {
+ go func() {
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
+ if r.targetResolver != nil {
+ r.targetResolver.ResolveNow(resolver.ResolveNowOptions{})
+ }
+ }()
+ }
+ return err
+}
+
+// updateTargetResolverState is the StateListener function provided to the
+// target resolver via wrappingClientConn. It updates the resolver state and
+// marks the target resolver as ready. If the update includes at least one TCP
+// address and the proxy resolver has not yet been constructed, it initializes
+// the proxy resolver. A combined state update is triggered once both resolvers
+// are ready. If all addresses are non-TCP, it proceeds without waiting for the
+// proxy resolver. If ClientConn.UpdateState returns a non-nil error,
+// ResolveNow() is called on the proxy resolver.
+func (r *delegatingResolver) updateTargetResolverState(state resolver.State) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if logger.V(2) {
+ logger.Infof("Addresses received from target resolver: %v", state.Addresses)
+ }
+ r.targetResolverState = &state
+ // If no addresses returned by resolver have network type as tcp , do not
+ // wait for proxy update.
+ if !isTCPAddressPresent(r.targetResolverState) {
+ return r.cc.UpdateState(*r.targetResolverState)
+ }
+
+ // The proxy resolver may be rebuilt multiple times, specifically each time
+ // the target resolver sends an update, even if the target resolver is built
+ // successfully but building the proxy resolver fails.
+ if len(r.proxyAddrs) == 0 {
+ go func() {
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
+ if _, ok := r.proxyResolver.(nopResolver); !ok {
+ return
+ }
+ proxyResolver, err := r.proxyURIResolver(resolver.BuildOptions{})
+ if err != nil {
+ r.cc.ReportError(fmt.Errorf("delegating_resolver: unable to build the proxy resolver: %v", err))
+ return
+ }
+ r.proxyResolver = proxyResolver
+ }()
+ }
+
+ err := r.updateClientConnStateLocked()
+ if err != nil {
+ go func() {
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
+ if r.proxyResolver != nil {
+ r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{})
+ }
+ }()
+ }
+ return nil
+}
+
+// wrappingClientConn serves as an intermediary between the parent ClientConn
+// and the child resolvers created here. It implements the resolver.ClientConn
+// interface and is passed in that capacity to the child resolvers.
+type wrappingClientConn struct {
+ // Callback to deliver resolver state updates
+ stateListener func(state resolver.State) error
+ parent *delegatingResolver
+}
+
+// UpdateState receives resolver state updates and forwards them to the
+// appropriate listener function (either for the proxy or target resolver).
+func (wcc *wrappingClientConn) UpdateState(state resolver.State) error {
+ return wcc.stateListener(state)
+}
+
+// ReportError intercepts errors from the child resolvers and passes them to
+// ClientConn.
+func (wcc *wrappingClientConn) ReportError(err error) {
+ wcc.parent.cc.ReportError(err)
+}
+
+// NewAddress intercepts the new resolved address from the child resolvers and
+// passes them to ClientConn.
+func (wcc *wrappingClientConn) NewAddress(addrs []resolver.Address) {
+ wcc.UpdateState(resolver.State{Addresses: addrs})
+}
+
+// ParseServiceConfig parses the provided service config and returns an object
+// that provides the parsed config.
+func (wcc *wrappingClientConn) ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult {
+ return wcc.parent.cc.ParseServiceConfig(serviceConfigJSON)
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
index 8ed347c54..ccc0e017e 100644
--- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go
+++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
@@ -59,7 +59,7 @@ func (s *ClientStream) Read(n int) (mem.BufferSlice, error) {
return b, err
}
-// Close closes the stream and popagates err to any readers.
+// Close closes the stream and propagates err to any readers.
func (s *ClientStream) Close(err error) {
var (
rst bool
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index d9305a65d..3dea23573 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -498,5 +498,5 @@ func mapRecvMsgError(err error) error {
if strings.Contains(err.Error(), "body closed by handler") {
return status.Error(codes.Canceled, err.Error())
}
- return connectionErrorf(true, err, err.Error())
+ return connectionErrorf(true, err, "%s", err.Error())
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index f323ab7f4..171e690a3 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -43,6 +43,7 @@ import (
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/grpcutil"
imetadata "google.golang.org/grpc/internal/metadata"
+ "google.golang.org/grpc/internal/proxyattributes"
istatus "google.golang.org/grpc/internal/status"
isyscall "google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/internal/transport/networktype"
@@ -153,7 +154,7 @@ type http2Client struct {
logger *grpclog.PrefixLogger
}
-func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) {
+func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, grpcUA string) (net.Conn, error) {
address := addr.Addr
networkType, ok := networktype.Get(addr)
if fn != nil {
@@ -175,10 +176,10 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error
return fn(ctx, address)
}
if !ok {
- networkType, address = parseDialTarget(address)
+ networkType, address = ParseDialTarget(address)
}
- if networkType == "tcp" && useProxy {
- return proxyDial(ctx, address, grpcUA)
+ if opts, present := proxyattributes.Get(addr); present {
+ return proxyDial(ctx, addr, grpcUA, opts)
}
return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address)
}
@@ -217,7 +218,7 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
// address specific arbitrary data to reach custom dialers and credential handshakers.
connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
- conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent)
+ conn, err := dial(connectCtx, opts.Dialer, addr, opts.UserAgent)
if err != nil {
if opts.FailOnNonTempDialError {
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
@@ -1241,7 +1242,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
statusCode = codes.DeadlineExceeded
}
}
- t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false)
+ st := status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode)
+ t.closeStream(s, st.Err(), false, http2.ErrCodeNo, st, nil, false)
}
func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
@@ -1389,8 +1391,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error {
// the caller.
func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
t.goAwayReason = GoAwayNoReason
- switch f.ErrCode {
- case http2.ErrCodeEnhanceYourCalm:
+ if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
if string(f.DebugData()) == "too_many_pings" {
t.goAwayReason = GoAwayTooManyPings
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 0055fddd7..7e53eb173 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -35,6 +35,7 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
+ "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/pretty"
@@ -564,7 +565,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
t.logger.Infof("Aborting the stream early: %v", errMsg)
}
t.controlBuf.put(&earlyAbortStream{
- httpStatus: 405,
+ httpStatus: http.StatusMethodNotAllowed,
streamID: streamID,
contentSubtype: s.contentSubtype,
status: status.New(codes.Internal, errMsg),
@@ -585,7 +586,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
stat = status.New(codes.PermissionDenied, err.Error())
}
t.controlBuf.put(&earlyAbortStream{
- httpStatus: 200,
+ httpStatus: http.StatusOK,
streamID: s.id,
contentSubtype: s.contentSubtype,
status: stat,
@@ -598,6 +599,22 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
if len(t.activeStreams) == 1 {
t.idle = time.Time{}
}
+ // Start a timer to close the stream on reaching the deadline.
+ if timeoutSet {
+ // We need to wait for s.cancel to be updated before calling
+ // t.closeStream to avoid data races.
+ cancelUpdated := make(chan struct{})
+ timer := internal.TimeAfterFunc(timeout, func() {
+ <-cancelUpdated
+ t.closeStream(s, true, http2.ErrCodeCancel, false)
+ })
+ oldCancel := s.cancel
+ s.cancel = func() {
+ oldCancel()
+ timer.Stop()
+ }
+ close(cancelUpdated)
+ }
t.mu.Unlock()
if channelz.IsOn() {
t.channelz.SocketMetrics.StreamsStarted.Add(1)
@@ -1274,7 +1291,6 @@ func (t *http2Server) Close(err error) {
// deleteStream deletes the stream s from transport's active streams.
func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
-
t.mu.Lock()
if _, ok := t.activeStreams[s.id]; ok {
delete(t.activeStreams, s.id)
@@ -1324,7 +1340,10 @@ func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCo
// called to interrupt the potential blocking on other goroutines.
s.cancel()
- s.swapState(streamDone)
+ oldState := s.swapState(streamDone)
+ if oldState == streamDone {
+ return
+ }
t.deleteStream(s, eosReceived)
t.controlBuf.put(&cleanupStream{
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 3613d7b64..f997f9fdb 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -439,8 +439,8 @@ func getWriteBufferPool(size int) *sync.Pool {
return pool
}
-// parseDialTarget returns the network and address to pass to dialer.
-func parseDialTarget(target string) (string, string) {
+// ParseDialTarget returns the network and address to pass to dialer.
+func ParseDialTarget(target string) (string, string) {
net := "tcp"
m1 := strings.Index(target, ":")
m2 := strings.Index(target, ":/")
diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go
index 54b224436..d77384595 100644
--- a/vendor/google.golang.org/grpc/internal/transport/proxy.go
+++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go
@@ -30,34 +30,16 @@ import (
"net/url"
"google.golang.org/grpc/internal"
+ "google.golang.org/grpc/internal/proxyattributes"
+ "google.golang.org/grpc/resolver"
)
const proxyAuthHeaderKey = "Proxy-Authorization"
-var (
- // The following variable will be overwritten in the tests.
- httpProxyFromEnvironment = http.ProxyFromEnvironment
-)
-
-func mapAddress(address string) (*url.URL, error) {
- req := &http.Request{
- URL: &url.URL{
- Scheme: "https",
- Host: address,
- },
- }
- url, err := httpProxyFromEnvironment(req)
- if err != nil {
- return nil, err
- }
- return url, nil
-}
-
// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
-// It's possible that this reader reads more than what's need for the response and stores
-// those bytes in the buffer.
-// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the
-// bytes in the buffer.
+// It's possible that this reader reads more than what's need for the response
+// and stores those bytes in the buffer. bufConn wraps the original net.Conn
+// and the bufio.Reader to make sure we don't lose the bytes in the buffer.
type bufConn struct {
net.Conn
r io.Reader
@@ -72,7 +54,7 @@ func basicAuth(username, password string) string {
return base64.StdEncoding.EncodeToString([]byte(auth))
}
-func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) {
+func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, grpcUA string, opts proxyattributes.Options) (_ net.Conn, err error) {
defer func() {
if err != nil {
conn.Close()
@@ -81,15 +63,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri
req := &http.Request{
Method: http.MethodConnect,
- URL: &url.URL{Host: backendAddr},
+ URL: &url.URL{Host: opts.ConnectAddr},
Header: map[string][]string{"User-Agent": {grpcUA}},
}
- if t := proxyURL.User; t != nil {
- u := t.Username()
- p, _ := t.Password()
+ if user := opts.User; user != nil {
+ u := user.Username()
+ p, _ := user.Password()
req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p))
}
-
if err := sendHTTPRequest(ctx, req, conn); err != nil {
return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
}
@@ -117,28 +98,13 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri
return conn, nil
}
-// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
-// is necessary, dials, does the HTTP CONNECT handshake, and returns the
-// connection.
-func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) {
- newAddr := addr
- proxyURL, err := mapAddress(addr)
- if err != nil {
- return nil, err
- }
- if proxyURL != nil {
- newAddr = proxyURL.Host
- }
-
- conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr)
+// proxyDial establishes a TCP connection to the specified address and performs an HTTP CONNECT handshake.
+func proxyDial(ctx context.Context, addr resolver.Address, grpcUA string, opts proxyattributes.Options) (net.Conn, error) {
+ conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", addr.Addr)
if err != nil {
return nil, err
}
- if proxyURL == nil {
- // proxy is disabled if proxyURL is nil.
- return conn, err
- }
- return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
+ return doHTTPConnectHandshake(ctx, conn, grpcUA, opts)
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
index a22a90151..cf8da0b52 100644
--- a/vendor/google.golang.org/grpc/internal/transport/server_stream.go
+++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
@@ -35,8 +35,10 @@ type ServerStream struct {
*Stream // Embed for common stream functionality.
st internalServerTransport
- ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance)
- cancel context.CancelFunc // invoked at the end of stream to cancel ctx.
+ ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance)
+ // cancel is invoked at the end of stream to cancel ctx. It also stops the
+ // timer for monitoring the rpc deadline if configured.
+ cancel func()
// Holds compressor names passed in grpc-accept-encoding metadata from the
// client.
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index 2859b8775..af4a4aeab 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -502,8 +502,6 @@ type ConnectOptions struct {
ChannelzParent *channelz.SubChannel
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
MaxHeaderListSize *uint32
- // UseProxy specifies if a proxy should be used.
- UseProxy bool
// The mem.BufferPool to use when reading/writing to the wire.
BufferPool mem.BufferPool
}
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index bdaa2130e..a2d2a798d 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -123,7 +123,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
if lastPickErr != nil {
errStr = "latest balancer error: " + lastPickErr.Error()
} else {
- errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error())
+ errStr = fmt.Sprintf("%v while waiting for connections to become ready", ctx.Err())
}
switch ctx.Err() {
case context.DeadlineExceeded:
diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go
index ada5b9bb7..c3c15ac96 100644
--- a/vendor/google.golang.org/grpc/resolver/map.go
+++ b/vendor/google.golang.org/grpc/resolver/map.go
@@ -18,16 +18,28 @@
package resolver
-type addressMapEntry struct {
+import (
+ "encoding/base64"
+ "sort"
+ "strings"
+)
+
+type addressMapEntry[T any] struct {
addr Address
- value any
+ value T
}
-// AddressMap is a map of addresses to arbitrary values taking into account
+// AddressMap is an AddressMapV2[any]. It will be deleted in an upcoming
+// release of grpc-go.
+//
+// Deprecated: use the generic AddressMapV2 type instead.
+type AddressMap = AddressMapV2[any]
+
+// AddressMapV2 is a map of addresses to arbitrary values taking into account
// Attributes. BalancerAttributes are ignored, as are Metadata and Type.
// Multiple accesses may not be performed concurrently. Must be created via
// NewAddressMap; do not construct directly.
-type AddressMap struct {
+type AddressMapV2[T any] struct {
// The underlying map is keyed by an Address with fields that we don't care
// about being set to their zero values. The only fields that we care about
// are `Addr`, `ServerName` and `Attributes`. Since we need to be able to
@@ -41,23 +53,30 @@ type AddressMap struct {
// The value type of the map contains a slice of addresses which match the key
// in their `Addr` and `ServerName` fields and contain the corresponding value
// associated with them.
- m map[Address]addressMapEntryList
+ m map[Address]addressMapEntryList[T]
}
func toMapKey(addr *Address) Address {
return Address{Addr: addr.Addr, ServerName: addr.ServerName}
}
-type addressMapEntryList []*addressMapEntry
+type addressMapEntryList[T any] []*addressMapEntry[T]
-// NewAddressMap creates a new AddressMap.
+// NewAddressMap creates a new AddressMapV2[any].
+//
+// Deprecated: use the generic NewAddressMapV2 constructor instead.
func NewAddressMap() *AddressMap {
- return &AddressMap{m: make(map[Address]addressMapEntryList)}
+ return NewAddressMapV2[any]()
+}
+
+// NewAddressMapV2 creates a new AddressMapV2.
+func NewAddressMapV2[T any]() *AddressMapV2[T] {
+ return &AddressMapV2[T]{m: make(map[Address]addressMapEntryList[T])}
}
// find returns the index of addr in the addressMapEntry slice, or -1 if not
// present.
-func (l addressMapEntryList) find(addr Address) int {
+func (l addressMapEntryList[T]) find(addr Address) int {
for i, entry := range l {
// Attributes are the only thing to match on here, since `Addr` and
// `ServerName` are already equal.
@@ -69,28 +88,28 @@ func (l addressMapEntryList) find(addr Address) int {
}
// Get returns the value for the address in the map, if present.
-func (a *AddressMap) Get(addr Address) (value any, ok bool) {
+func (a *AddressMapV2[T]) Get(addr Address) (value T, ok bool) {
addrKey := toMapKey(&addr)
entryList := a.m[addrKey]
if entry := entryList.find(addr); entry != -1 {
return entryList[entry].value, true
}
- return nil, false
+ return value, false
}
// Set updates or adds the value to the address in the map.
-func (a *AddressMap) Set(addr Address, value any) {
+func (a *AddressMapV2[T]) Set(addr Address, value T) {
addrKey := toMapKey(&addr)
entryList := a.m[addrKey]
if entry := entryList.find(addr); entry != -1 {
entryList[entry].value = value
return
}
- a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value})
+ a.m[addrKey] = append(entryList, &addressMapEntry[T]{addr: addr, value: value})
}
// Delete removes addr from the map.
-func (a *AddressMap) Delete(addr Address) {
+func (a *AddressMapV2[T]) Delete(addr Address) {
addrKey := toMapKey(&addr)
entryList := a.m[addrKey]
entry := entryList.find(addr)
@@ -107,7 +126,7 @@ func (a *AddressMap) Delete(addr Address) {
}
// Len returns the number of entries in the map.
-func (a *AddressMap) Len() int {
+func (a *AddressMapV2[T]) Len() int {
ret := 0
for _, entryList := range a.m {
ret += len(entryList)
@@ -116,7 +135,7 @@ func (a *AddressMap) Len() int {
}
// Keys returns a slice of all current map keys.
-func (a *AddressMap) Keys() []Address {
+func (a *AddressMapV2[T]) Keys() []Address {
ret := make([]Address, 0, a.Len())
for _, entryList := range a.m {
for _, entry := range entryList {
@@ -127,8 +146,8 @@ func (a *AddressMap) Keys() []Address {
}
// Values returns a slice of all current map values.
-func (a *AddressMap) Values() []any {
- ret := make([]any, 0, a.Len())
+func (a *AddressMapV2[T]) Values() []T {
+ ret := make([]T, 0, a.Len())
for _, entryList := range a.m {
for _, entry := range entryList {
ret = append(ret, entry.value)
@@ -137,70 +156,65 @@ func (a *AddressMap) Values() []any {
return ret
}
-type endpointNode struct {
- addrs map[string]struct{}
-}
-
-// Equal returns whether the unordered set of addrs are the same between the
-// endpoint nodes.
-func (en *endpointNode) Equal(en2 *endpointNode) bool {
- if len(en.addrs) != len(en2.addrs) {
- return false
- }
- for addr := range en.addrs {
- if _, ok := en2.addrs[addr]; !ok {
- return false
- }
- }
- return true
-}
-
-func toEndpointNode(endpoint Endpoint) endpointNode {
- en := make(map[string]struct{})
- for _, addr := range endpoint.Addresses {
- en[addr.Addr] = struct{}{}
- }
- return endpointNode{
- addrs: en,
- }
-}
+type endpointMapKey string
// EndpointMap is a map of endpoints to arbitrary values keyed on only the
// unordered set of address strings within an endpoint. This map is not thread
// safe, thus it is unsafe to access concurrently. Must be created via
// NewEndpointMap; do not construct directly.
-type EndpointMap struct {
- endpoints map[*endpointNode]any
+type EndpointMap[T any] struct {
+ endpoints map[endpointMapKey]endpointData[T]
+}
+
+type endpointData[T any] struct {
+ // decodedKey stores the original key to avoid decoding when iterating on
+ // EndpointMap keys.
+ decodedKey Endpoint
+ value T
}
// NewEndpointMap creates a new EndpointMap.
-func NewEndpointMap() *EndpointMap {
- return &EndpointMap{
- endpoints: make(map[*endpointNode]any),
+func NewEndpointMap[T any]() *EndpointMap[T] {
+ return &EndpointMap[T]{
+ endpoints: make(map[endpointMapKey]endpointData[T]),
}
}
+// encodeEndpoint returns a string that uniquely identifies the unordered set of
+// addresses within an endpoint.
+func encodeEndpoint(e Endpoint) endpointMapKey {
+ addrs := make([]string, 0, len(e.Addresses))
+ // base64 encoding the address strings restricts the characters present
+ // within the strings. This allows us to use a delimiter without the need of
+ // escape characters.
+ for _, addr := range e.Addresses {
+ addrs = append(addrs, base64.StdEncoding.EncodeToString([]byte(addr.Addr)))
+ }
+ sort.Strings(addrs)
+ // " " should not appear in base64 encoded strings.
+ return endpointMapKey(strings.Join(addrs, " "))
+}
+
// Get returns the value for the address in the map, if present.
-func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) {
- en := toEndpointNode(e)
- if endpoint := em.find(en); endpoint != nil {
- return em.endpoints[endpoint], true
+func (em *EndpointMap[T]) Get(e Endpoint) (value T, ok bool) {
+ val, found := em.endpoints[encodeEndpoint(e)]
+ if found {
+ return val.value, true
}
- return nil, false
+ return value, false
}
// Set updates or adds the value to the address in the map.
-func (em *EndpointMap) Set(e Endpoint, value any) {
- en := toEndpointNode(e)
- if endpoint := em.find(en); endpoint != nil {
- em.endpoints[endpoint] = value
- return
+func (em *EndpointMap[T]) Set(e Endpoint, value T) {
+ en := encodeEndpoint(e)
+ em.endpoints[en] = endpointData[T]{
+ decodedKey: Endpoint{Addresses: e.Addresses},
+ value: value,
}
- em.endpoints[&en] = value
}
// Len returns the number of entries in the map.
-func (em *EndpointMap) Len() int {
+func (em *EndpointMap[T]) Len() int {
return len(em.endpoints)
}
@@ -209,43 +223,25 @@ func (em *EndpointMap) Len() int {
// the unordered set of addresses. Thus, endpoint information returned is not
// the full endpoint data (drops duplicated addresses and attributes) but can be
// used for EndpointMap accesses.
-func (em *EndpointMap) Keys() []Endpoint {
+func (em *EndpointMap[T]) Keys() []Endpoint {
ret := make([]Endpoint, 0, len(em.endpoints))
- for en := range em.endpoints {
- var endpoint Endpoint
- for addr := range en.addrs {
- endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr})
- }
- ret = append(ret, endpoint)
+ for _, en := range em.endpoints {
+ ret = append(ret, en.decodedKey)
}
return ret
}
// Values returns a slice of all current map values.
-func (em *EndpointMap) Values() []any {
- ret := make([]any, 0, len(em.endpoints))
+func (em *EndpointMap[T]) Values() []T {
+ ret := make([]T, 0, len(em.endpoints))
for _, val := range em.endpoints {
- ret = append(ret, val)
+ ret = append(ret, val.value)
}
return ret
}
-// find returns a pointer to the endpoint node in em if the endpoint node is
-// already present. If not found, nil is returned. The comparisons are done on
-// the unordered set of addresses within an endpoint.
-func (em EndpointMap) find(e endpointNode) *endpointNode {
- for endpoint := range em.endpoints {
- if e.Equal(endpoint) {
- return endpoint
- }
- }
- return nil
-}
-
// Delete removes the specified endpoint from the map.
-func (em *EndpointMap) Delete(e Endpoint) {
- en := toEndpointNode(e)
- if entry := em.find(en); entry != nil {
- delete(em.endpoints, entry)
- }
+func (em *EndpointMap[T]) Delete(e Endpoint) {
+ en := encodeEndpoint(e)
+ delete(em.endpoints, en)
}
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index 8eb1cf3bc..b84ef26d4 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -30,6 +30,7 @@ import (
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/serviceconfig"
)
@@ -175,6 +176,8 @@ type BuildOptions struct {
// Authority is the effective authority of the clientconn for which the
// resolver is built.
Authority string
+ // MetricsRecorder is the metrics recorder to do recording.
+ MetricsRecorder stats.MetricsRecorder
}
// An Endpoint is one network endpoint, or server, which may have multiple
diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go
index 23bb3fb25..80e16a327 100644
--- a/vendor/google.golang.org/grpc/resolver_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_wrapper.go
@@ -26,6 +26,7 @@ import (
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/pretty"
+ "google.golang.org/grpc/internal/resolver/delegatingresolver"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
)
@@ -76,9 +77,19 @@ func (ccr *ccResolverWrapper) start() error {
CredsBundle: ccr.cc.dopts.copts.CredsBundle,
Dialer: ccr.cc.dopts.copts.Dialer,
Authority: ccr.cc.authority,
+ MetricsRecorder: ccr.cc.metricsRecorderList,
}
var err error
- ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts)
+ // The delegating resolver is used unless:
+ // - A custom dialer is provided via WithContextDialer dialoption or
+ // - Proxy usage is disabled through WithNoProxy dialoption.
+ // In these cases, the resolver is built based on the scheme of target,
+ // using the appropriate resolver builder.
+ if ccr.cc.dopts.copts.Dialer != nil || !ccr.cc.dopts.useProxy {
+ ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts)
+ } else {
+ ccr.resolver, err = delegatingresolver.New(ccr.cc.parsedTarget, ccr, opts, ccr.cc.resolverBuilder, ccr.cc.dopts.enableLocalDNSResolution)
+ }
errCh <- err
})
return <-errCh
@@ -123,12 +134,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
return nil
}
if s.Endpoints == nil {
- s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses))
- for _, a := range s.Addresses {
- ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
- ep.Addresses[0].BalancerAttributes = nil
- s.Endpoints = append(s.Endpoints, ep)
- }
+ s.Endpoints = addressesToEndpoints(s.Addresses)
}
ccr.addChannelzTraceEvent(s)
ccr.curState = s
@@ -161,7 +167,11 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
ccr.cc.mu.Unlock()
return
}
- s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}
+ s := resolver.State{
+ Addresses: addrs,
+ ServiceConfig: ccr.curState.ServiceConfig,
+ Endpoints: addressesToEndpoints(addrs),
+ }
ccr.addChannelzTraceEvent(s)
ccr.curState = s
ccr.mu.Unlock()
@@ -199,3 +209,13 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
}
channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
}
+
+func addressesToEndpoints(addrs []resolver.Address) []resolver.Endpoint {
+ endpoints := make([]resolver.Endpoint, 0, len(addrs))
+ for _, a := range addrs {
+ ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
+ ep.Addresses[0].BalancerAttributes = nil
+ endpoints = append(endpoints, ep)
+ }
+ return endpoints
+}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index 9fac2b08b..ad20e9dff 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -151,7 +151,7 @@ func (d *gzipDecompressor) Type() string {
// callInfo contains all related configuration and information about an RPC.
type callInfo struct {
- compressorType string
+ compressorName string
failFast bool
maxReceiveMessageSize *int
maxSendMessageSize *int
@@ -222,7 +222,7 @@ type HeaderCallOption struct {
func (o HeaderCallOption) before(*callInfo) error { return nil }
func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) {
- *o.HeaderAddr, _ = attempt.s.Header()
+ *o.HeaderAddr, _ = attempt.transportStream.Header()
}
// Trailer returns a CallOptions that retrieves the trailer metadata
@@ -244,7 +244,7 @@ type TrailerCallOption struct {
func (o TrailerCallOption) before(*callInfo) error { return nil }
func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) {
- *o.TrailerAddr = attempt.s.Trailer()
+ *o.TrailerAddr = attempt.transportStream.Trailer()
}
// Peer returns a CallOption that retrieves peer information for a unary RPC.
@@ -266,7 +266,7 @@ type PeerCallOption struct {
func (o PeerCallOption) before(*callInfo) error { return nil }
func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) {
- if x, ok := peer.FromContext(attempt.s.Context()); ok {
+ if x, ok := peer.FromContext(attempt.transportStream.Context()); ok {
*o.PeerAddr = *x
}
}
@@ -435,7 +435,7 @@ type CompressorCallOption struct {
}
func (o CompressorCallOption) before(c *callInfo) error {
- c.compressorType = o.CompressorType
+ c.compressorName = o.CompressorType
return nil
}
func (o CompressorCallOption) after(*callInfo, *csAttempt) {}
@@ -692,9 +692,9 @@ func encode(c baseCodec, msg any) (mem.BufferSlice, error) {
if err != nil {
return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
}
- if uint(b.Len()) > math.MaxUint32 {
+ if bufSize := uint(b.Len()); bufSize > math.MaxUint32 {
b.Free()
- return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
+ return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", bufSize)
}
return b, nil
}
@@ -828,30 +828,13 @@ func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveM
return nil, st.Err()
}
- var size int
if pf.isCompressed() {
defer compressed.Free()
-
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
// use this decompressor as the default.
- if dc != nil {
- var uncompressedBuf []byte
- uncompressedBuf, err = dc.Do(compressed.Reader())
- if err == nil {
- out = mem.BufferSlice{mem.SliceBuffer(uncompressedBuf)}
- }
- size = len(uncompressedBuf)
- } else {
- out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool)
- }
+ out, err = decompress(compressor, compressed, dc, maxReceiveMessageSize, p.bufferPool)
if err != nil {
- return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
- }
- if size > maxReceiveMessageSize {
- out.Free()
- // TODO: Revisit the error code. Currently keep it consistent with java
- // implementation.
- return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
+ return nil, err
}
} else {
out = compressed
@@ -866,20 +849,46 @@ func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveM
return out, nil
}
-// Using compressor, decompress d, returning data and size.
-// Optionally, if data will be over maxReceiveMessageSize, just return the size.
-func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) {
- dcReader, err := compressor.Decompress(d.Reader())
- if err != nil {
- return nil, 0, err
+// decompress processes the given data by decompressing it using either a custom decompressor or a standard compressor.
+// If a custom decompressor is provided, it takes precedence. The function validates that the decompressed data
+// does not exceed the specified maximum size and returns an error if this limit is exceeded.
+// On success, it returns the decompressed data. Otherwise, it returns an error if decompression fails or the data exceeds the size limit.
+func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompressor, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, error) {
+ if dc != nil {
+ uncompressed, err := dc.Do(d.Reader())
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
+ }
+ if len(uncompressed) > maxReceiveMessageSize {
+ return nil, status.Errorf(codes.ResourceExhausted, "grpc: message after decompression larger than max (%d vs. %d)", len(uncompressed), maxReceiveMessageSize)
+ }
+ return mem.BufferSlice{mem.SliceBuffer(uncompressed)}, nil
}
+ if compressor != nil {
+ dcReader, err := compressor.Decompress(d.Reader())
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err)
+ }
- out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1), pool)
- if err != nil {
- out.Free()
- return nil, 0, err
+ // Read at most one byte more than the limit from the decompressor.
+ // Unless the limit is MaxInt64, in which case, that's impossible, so
+ // apply no limit.
+ if limit := int64(maxReceiveMessageSize); limit < math.MaxInt64 {
+ dcReader = io.LimitReader(dcReader, limit+1)
+ }
+ out, err := mem.ReadAll(dcReader, pool)
+ if err != nil {
+ out.Free()
+ return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err)
+ }
+
+ if out.Len() > maxReceiveMessageSize {
+ out.Free()
+ return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize)
+ }
+ return out, nil
}
- return out, out.Len(), nil
+ return nil, status.Errorf(codes.Internal, "grpc: no decompressor available for compressed payload")
}
type recvCompressor interface {
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 16065a027..976e70ae0 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -37,12 +37,14 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/encoding/proto"
+ estats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/grpcutil"
+ istats "google.golang.org/grpc/internal/stats"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/mem"
@@ -82,6 +84,9 @@ func init() {
internal.BinaryLogger = binaryLogger
internal.JoinServerOptions = newJoinServerOption
internal.BufferPool = bufferPool
+ internal.MetricsRecorderForServer = func(srv *Server) estats.MetricsRecorder {
+ return istats.NewMetricsRecorderList(srv.opts.statsHandlers)
+ }
}
var statusOK = status.New(codes.OK, "")
@@ -643,7 +648,7 @@ func (s *Server) serverWorker() {
// connections to reduce the time spent overall on runtime.morestack.
func (s *Server) initServerWorkers() {
s.serverWorkerChannel = make(chan func())
- s.serverWorkerChannelClose = grpcsync.OnceFunc(func() {
+ s.serverWorkerChannelClose = sync.OnceFunc(func() {
close(s.serverWorkerChannel)
})
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
@@ -1360,8 +1365,16 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt
}
return err
}
- defer d.Free()
+ freed := false
+ dataFree := func() {
+ if !freed {
+ d.Free()
+ freed = true
+ }
+ }
+ defer dataFree()
df := func(v any) error {
+ defer dataFree()
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
@@ -1637,10 +1650,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
// If dc is set and matches the stream's compression, use it. Otherwise, try
// to find a matching registered compressor for decomp.
if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
- ss.dc = s.opts.dc
+ ss.decompressorV0 = s.opts.dc
} else if rc != "" && rc != encoding.Identity {
- ss.decomp = encoding.GetCompressor(rc)
- if ss.decomp == nil {
+ ss.decompressorV1 = encoding.GetCompressor(rc)
+ if ss.decompressorV1 == nil {
st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
ss.s.WriteStatus(st)
return st.Err()
@@ -1652,12 +1665,12 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
//
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
if s.opts.cp != nil {
- ss.cp = s.opts.cp
+ ss.compressorV0 = s.opts.cp
ss.sendCompressorName = s.opts.cp.Type()
} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
// Legacy compressor not specified; attempt to respond with same encoding.
- ss.comp = encoding.GetCompressor(rc)
- if ss.comp != nil {
+ ss.compressorV1 = encoding.GetCompressor(rc)
+ if ss.compressorV1 != nil {
ss.sendCompressorName = rc
}
}
@@ -1668,7 +1681,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
}
}
- ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp)
+ ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.compressorV0, ss.compressorV1)
if trInfo != nil {
trInfo.tr.LazyLog(&trInfo.firstLine, false)
@@ -1922,7 +1935,7 @@ func (s *Server) stop(graceful bool) {
s.conns = nil
if s.opts.numServerWorkers > 0 {
- // Closing the channel (only once, via grpcsync.OnceFunc) after all the
+ // Closing the channel (only once, via sync.OnceFunc) after all the
// connections have been closed above ensures that there are no
// goroutines executing the callback passed to st.HandleStreams (where
// the channel is written to).
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
index 7e83027d1..8d451e07c 100644
--- a/vendor/google.golang.org/grpc/service_config.go
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -268,18 +268,21 @@ func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult {
return &serviceconfig.ParseResult{Config: &sc}
}
+func isValidRetryPolicy(jrp *jsonRetryPolicy) bool {
+ return jrp.MaxAttempts > 1 &&
+ jrp.InitialBackoff > 0 &&
+ jrp.MaxBackoff > 0 &&
+ jrp.BackoffMultiplier > 0 &&
+ len(jrp.RetryableStatusCodes) > 0
+}
+
func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) {
if jrp == nil {
return nil, nil
}
- if jrp.MaxAttempts <= 1 ||
- jrp.InitialBackoff <= 0 ||
- jrp.MaxBackoff <= 0 ||
- jrp.BackoffMultiplier <= 0 ||
- len(jrp.RetryableStatusCodes) == 0 {
- logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
- return nil, nil
+ if !isValidRetryPolicy(jrp) {
+ return nil, fmt.Errorf("invalid retry policy (%+v): ", jrp)
}
if jrp.MaxAttempts < maxAttempts {
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
index 6f20d2d54..baf7740ef 100644
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -36,7 +36,12 @@ type RPCStats interface {
IsClient() bool
}
-// Begin contains stats when an RPC attempt begins.
+// Begin contains stats for the start of an RPC attempt.
+//
+// - Server-side: Triggered after `InHeader`, as headers are processed
+// before the RPC lifecycle begins.
+// - Client-side: The first stats event recorded.
+//
// FailFast is only valid if this Begin is from client side.
type Begin struct {
// Client is true if this Begin is from client side.
@@ -69,7 +74,7 @@ func (*PickerUpdated) IsClient() bool { return true }
func (*PickerUpdated) isRPCStats() {}
-// InPayload contains the information for an incoming payload.
+// InPayload contains stats about an incoming payload.
type InPayload struct {
// Client is true if this InPayload is from client side.
Client bool
@@ -98,7 +103,9 @@ func (s *InPayload) IsClient() bool { return s.Client }
func (s *InPayload) isRPCStats() {}
-// InHeader contains stats when a header is received.
+// InHeader contains stats about header reception.
+//
+// - Server-side: The first stats event after the RPC request is received.
type InHeader struct {
// Client is true if this InHeader is from client side.
Client bool
@@ -123,7 +130,7 @@ func (s *InHeader) IsClient() bool { return s.Client }
func (s *InHeader) isRPCStats() {}
-// InTrailer contains stats when a trailer is received.
+// InTrailer contains stats about trailer reception.
type InTrailer struct {
// Client is true if this InTrailer is from client side.
Client bool
@@ -139,7 +146,7 @@ func (s *InTrailer) IsClient() bool { return s.Client }
func (s *InTrailer) isRPCStats() {}
-// OutPayload contains the information for an outgoing payload.
+// OutPayload contains stats about an outgoing payload.
type OutPayload struct {
// Client is true if this OutPayload is from client side.
Client bool
@@ -166,7 +173,10 @@ func (s *OutPayload) IsClient() bool { return s.Client }
func (s *OutPayload) isRPCStats() {}
-// OutHeader contains stats when a header is sent.
+// OutHeader contains stats about header transmission.
+//
+// - Client-side: Only occurs after 'Begin', as headers are always the first
+// thing sent on a stream.
type OutHeader struct {
// Client is true if this OutHeader is from client side.
Client bool
@@ -189,14 +199,15 @@ func (s *OutHeader) IsClient() bool { return s.Client }
func (s *OutHeader) isRPCStats() {}
-// OutTrailer contains stats when a trailer is sent.
+// OutTrailer contains stats about trailer transmission.
type OutTrailer struct {
// Client is true if this OutTrailer is from client side.
Client bool
// WireLength is the wire length of trailer.
//
- // Deprecated: This field is never set. The length is not known when this message is
- // emitted because the trailer fields are compressed with hpack after that.
+ // Deprecated: This field is never set. The length is not known when this
+ // message is emitted because the trailer fields are compressed with hpack
+ // after that.
WireLength int
// Trailer contains the trailer metadata sent to the client. This
// field is only valid if this OutTrailer is from the server side.
@@ -208,7 +219,7 @@ func (s *OutTrailer) IsClient() bool { return s.Client }
func (s *OutTrailer) isRPCStats() {}
-// End contains stats when an RPC ends.
+// End contains stats about RPC completion.
type End struct {
// Client is true if this End is from client side.
Client bool
@@ -238,7 +249,7 @@ type ConnStats interface {
IsClient() bool
}
-// ConnBegin contains the stats of a connection when it is established.
+// ConnBegin contains stats about connection establishment.
type ConnBegin struct {
// Client is true if this ConnBegin is from client side.
Client bool
@@ -249,7 +260,7 @@ func (s *ConnBegin) IsClient() bool { return s.Client }
func (s *ConnBegin) isConnStats() {}
-// ConnEnd contains the stats of a connection when it ends.
+// ConnEnd contains stats about connection termination.
type ConnEnd struct {
// Client is true if this ConnEnd is from client side.
Client bool
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index 17e2267b3..12163150b 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -258,9 +258,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) {
- c := defaultCallInfo()
+ callInfo := defaultCallInfo()
if mc.WaitForReady != nil {
- c.failFast = !*mc.WaitForReady
+ callInfo.failFast = !*mc.WaitForReady
}
// Possible context leak:
@@ -281,20 +281,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
}()
for _, o := range opts {
- if err := o.before(c); err != nil {
+ if err := o.before(callInfo); err != nil {
return nil, toRPCErr(err)
}
}
- c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
- c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
- if err := setCallInfoCodec(c); err != nil {
+ callInfo.maxSendMessageSize = getMaxSize(mc.MaxReqSize, callInfo.maxSendMessageSize, defaultClientMaxSendMessageSize)
+ callInfo.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, callInfo.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+ if err := setCallInfoCodec(callInfo); err != nil {
return nil, err
}
callHdr := &transport.CallHdr{
Host: cc.authority,
Method: method,
- ContentSubtype: c.contentSubtype,
+ ContentSubtype: callInfo.contentSubtype,
DoneFunc: doneFunc,
}
@@ -302,22 +302,22 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
// set. In that case, also find the compressor from the encoding package.
// Otherwise, use the compressor configured by the WithCompressor DialOption,
// if set.
- var cp Compressor
- var comp encoding.Compressor
- if ct := c.compressorType; ct != "" {
+ var compressorV0 Compressor
+ var compressorV1 encoding.Compressor
+ if ct := callInfo.compressorName; ct != "" {
callHdr.SendCompress = ct
if ct != encoding.Identity {
- comp = encoding.GetCompressor(ct)
- if comp == nil {
+ compressorV1 = encoding.GetCompressor(ct)
+ if compressorV1 == nil {
return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
}
}
- } else if cc.dopts.cp != nil {
- callHdr.SendCompress = cc.dopts.cp.Type()
- cp = cc.dopts.cp
+ } else if cc.dopts.compressorV0 != nil {
+ callHdr.SendCompress = cc.dopts.compressorV0.Type()
+ compressorV0 = cc.dopts.compressorV0
}
- if c.creds != nil {
- callHdr.Creds = c.creds
+ if callInfo.creds != nil {
+ callHdr.Creds = callInfo.creds
}
cs := &clientStream{
@@ -325,12 +325,12 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
ctx: ctx,
methodConfig: &mc,
opts: opts,
- callInfo: c,
+ callInfo: callInfo,
cc: cc,
desc: desc,
- codec: c.codec,
- cp: cp,
- comp: comp,
+ codec: callInfo.codec,
+ compressorV0: compressorV0,
+ compressorV1: compressorV1,
cancel: cancel,
firstAttempt: true,
onCommit: onCommit,
@@ -412,7 +412,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
return nil, ErrClientConnClosing
}
- ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
+ ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1)
method := cs.callHdr.Method
var beginTime time.Time
shs := cs.cc.dopts.copts.StatsHandlers
@@ -454,12 +454,12 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
}
return &csAttempt{
- ctx: ctx,
- beginTime: beginTime,
- cs: cs,
- dc: cs.cc.dopts.dc,
- statsHandlers: shs,
- trInfo: trInfo,
+ ctx: ctx,
+ beginTime: beginTime,
+ cs: cs,
+ decompressorV0: cs.cc.dopts.dc,
+ statsHandlers: shs,
+ trInfo: trInfo,
}, nil
}
@@ -467,7 +467,7 @@ func (a *csAttempt) getTransport() error {
cs := a.cs
var err error
- a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
+ a.transport, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
if err != nil {
if de, ok := err.(dropError); ok {
err = de.error
@@ -476,7 +476,7 @@ func (a *csAttempt) getTransport() error {
return err
}
if a.trInfo != nil {
- a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr())
+ a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr())
}
return nil
}
@@ -503,7 +503,7 @@ func (a *csAttempt) newStream() error {
a.ctx = metadata.NewOutgoingContext(a.ctx, md)
}
- s, err := a.t.NewStream(a.ctx, cs.callHdr)
+ s, err := a.transport.NewStream(a.ctx, cs.callHdr)
if err != nil {
nse, ok := err.(*transport.NewStreamError)
if !ok {
@@ -518,9 +518,9 @@ func (a *csAttempt) newStream() error {
// Unwrap and convert error.
return toRPCErr(nse.Err)
}
- a.s = s
+ a.transportStream = s
a.ctx = s.Context()
- a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
+ a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
return nil
}
@@ -532,9 +532,9 @@ type clientStream struct {
cc *ClientConn
desc *StreamDesc
- codec baseCodec
- cp Compressor
- comp encoding.Compressor
+ codec baseCodec
+ compressorV0 Compressor
+ compressorV1 encoding.Compressor
cancel context.CancelFunc // cancels all attempts
@@ -583,17 +583,17 @@ type replayOp struct {
// csAttempt implements a single transport stream attempt within a
// clientStream.
type csAttempt struct {
- ctx context.Context
- cs *clientStream
- t transport.ClientTransport
- s *transport.ClientStream
- p *parser
- pickResult balancer.PickResult
-
- finished bool
- dc Decompressor
- decomp encoding.Compressor
- decompSet bool
+ ctx context.Context
+ cs *clientStream
+ transport transport.ClientTransport
+ transportStream *transport.ClientStream
+ parser *parser
+ pickResult balancer.PickResult
+
+ finished bool
+ decompressorV0 Decompressor
+ decompressorV1 encoding.Compressor
+ decompressorSet bool
mu sync.Mutex // guards trInfo.tr
// trInfo may be nil (if EnableTracing is false).
@@ -639,14 +639,14 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) {
// RPC is finished or committed or was dropped by the picker; cannot retry.
return false, err
}
- if a.s == nil && a.allowTransparentRetry {
+ if a.transportStream == nil && a.allowTransparentRetry {
return true, nil
}
// Wait for the trailers.
unprocessed := false
- if a.s != nil {
- <-a.s.Done()
- unprocessed = a.s.Unprocessed()
+ if a.transportStream != nil {
+ <-a.transportStream.Done()
+ unprocessed = a.transportStream.Unprocessed()
}
if cs.firstAttempt && unprocessed {
// First attempt, stream unprocessed: transparently retry.
@@ -658,14 +658,14 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) {
pushback := 0
hasPushback := false
- if a.s != nil {
- if !a.s.TrailersOnly() {
+ if a.transportStream != nil {
+ if !a.transportStream.TrailersOnly() {
return false, err
}
// TODO(retry): Move down if the spec changes to not check server pushback
// before considering this a failure for throttling.
- sps := a.s.Trailer()["grpc-retry-pushback-ms"]
+ sps := a.transportStream.Trailer()["grpc-retry-pushback-ms"]
if len(sps) == 1 {
var e error
if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
@@ -682,8 +682,8 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) {
}
var code codes.Code
- if a.s != nil {
- code = a.s.Status().Code()
+ if a.transportStream != nil {
+ code = a.transportStream.Status().Code()
} else {
code = status.Code(err)
}
@@ -756,8 +756,8 @@ func (cs *clientStream) Context() context.Context {
cs.commitAttempt()
// No need to lock before using attempt, since we know it is committed and
// cannot change.
- if cs.attempt.s != nil {
- return cs.attempt.s.Context()
+ if cs.attempt.transportStream != nil {
+ return cs.attempt.transportStream.Context()
}
return cs.ctx
}
@@ -794,9 +794,9 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
continue
}
if err == io.EOF {
- <-a.s.Done()
+ <-a.transportStream.Done()
}
- if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) {
+ if err == nil || (err == io.EOF && a.transportStream.Status().Code() == codes.OK) {
onSuccess()
cs.mu.Unlock()
return err
@@ -812,7 +812,7 @@ func (cs *clientStream) Header() (metadata.MD, error) {
var m metadata.MD
err := cs.withRetry(func(a *csAttempt) error {
var err error
- m, err = a.s.Header()
+ m, err = a.transportStream.Header()
return toRPCErr(err)
}, cs.commitAttemptLocked)
@@ -856,10 +856,10 @@ func (cs *clientStream) Trailer() metadata.MD {
// directions -- it will prevent races and should not meaningfully impact
// performance.
cs.commitAttempt()
- if cs.attempt.s == nil {
+ if cs.attempt.transportStream == nil {
return nil
}
- return cs.attempt.s.Trailer()
+ return cs.attempt.transportStream.Trailer()
}
func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
@@ -904,7 +904,7 @@ func (cs *clientStream) SendMsg(m any) (err error) {
}
// load hdr, payload, data
- hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool)
+ hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.compressorV0, cs.compressorV1, cs.cc.dopts.copts.BufferPool)
if err != nil {
return err
}
@@ -992,7 +992,7 @@ func (cs *clientStream) CloseSend() error {
}
cs.sentLast = true
op := func(a *csAttempt) error {
- a.s.Write(nil, nil, &transport.WriteOptions{Last: true})
+ a.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true})
// Always return nil; io.EOF is the only error that might make sense
// instead, but there is no need to signal the client to call RecvMsg
// as the only use left for the stream after CloseSend is to call
@@ -1030,7 +1030,7 @@ func (cs *clientStream) finish(err error) {
if cs.attempt != nil {
cs.attempt.finish(err)
// after functions all rely upon having a stream.
- if cs.attempt.s != nil {
+ if cs.attempt.transportStream != nil {
for _, o := range cs.opts {
o.after(cs.callInfo, cs.attempt)
}
@@ -1084,7 +1084,7 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength
}
a.mu.Unlock()
}
- if err := a.s.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil {
+ if err := a.transportStream.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil {
if !cs.desc.ClientStreams {
// For non-client-streaming RPCs, we return nil instead of EOF on error
// because the generated code requires it. finish is not called; RecvMsg()
@@ -1108,25 +1108,25 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
defer payInfo.free()
}
- if !a.decompSet {
+ if !a.decompressorSet {
// Block until we receive headers containing received message encoding.
- if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
- if a.dc == nil || a.dc.Type() != ct {
+ if ct := a.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity {
+ if a.decompressorV0 == nil || a.decompressorV0.Type() != ct {
// No configured decompressor, or it does not match the incoming
// message encoding; attempt to find a registered compressor that does.
- a.dc = nil
- a.decomp = encoding.GetCompressor(ct)
+ a.decompressorV0 = nil
+ a.decompressorV1 = encoding.GetCompressor(ct)
}
} else {
// No compression is used; disable our decompressor.
- a.dc = nil
+ a.decompressorV0 = nil
}
// Only initialize this state once per stream.
- a.decompSet = true
+ a.decompressorSet = true
}
- if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil {
+ if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil {
if err == io.EOF {
- if statusErr := a.s.Status().Err(); statusErr != nil {
+ if statusErr := a.transportStream.Status().Err(); statusErr != nil {
return statusErr
}
return io.EOF // indicates successful end of stream.
@@ -1157,8 +1157,8 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF {
- return a.s.Status().Err() // non-server streaming Recv returns nil on success
+ if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF {
+ return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success
} else if err != nil {
return toRPCErr(err)
}
@@ -1177,20 +1177,20 @@ func (a *csAttempt) finish(err error) {
err = nil
}
var tr metadata.MD
- if a.s != nil {
- a.s.Close(err)
- tr = a.s.Trailer()
+ if a.transportStream != nil {
+ a.transportStream.Close(err)
+ tr = a.transportStream.Trailer()
}
if a.pickResult.Done != nil {
br := false
- if a.s != nil {
- br = a.s.BytesReceived()
+ if a.transportStream != nil {
+ br = a.transportStream.BytesReceived()
}
a.pickResult.Done(balancer.DoneInfo{
Err: err,
Trailer: tr,
- BytesSent: a.s != nil,
+ BytesSent: a.transportStream != nil,
BytesReceived: br,
ServerLoad: balancerload.Parse(tr),
})
@@ -1272,7 +1272,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
// if set.
var cp Compressor
var comp encoding.Compressor
- if ct := c.compressorType; ct != "" {
+ if ct := c.compressorName; ct != "" {
callHdr.SendCompress = ct
if ct != encoding.Identity {
comp = encoding.GetCompressor(ct)
@@ -1280,9 +1280,9 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
}
}
- } else if ac.cc.dopts.cp != nil {
- callHdr.SendCompress = ac.cc.dopts.cp.Type()
- cp = ac.cc.dopts.cp
+ } else if ac.cc.dopts.compressorV0 != nil {
+ callHdr.SendCompress = ac.cc.dopts.compressorV0.Type()
+ cp = ac.cc.dopts.compressorV0
}
if c.creds != nil {
callHdr.Creds = c.creds
@@ -1290,26 +1290,26 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
// Use a special addrConnStream to avoid retry.
as := &addrConnStream{
- callHdr: callHdr,
- ac: ac,
- ctx: ctx,
- cancel: cancel,
- opts: opts,
- callInfo: c,
- desc: desc,
- codec: c.codec,
- cp: cp,
- comp: comp,
- t: t,
- }
-
- s, err := as.t.NewStream(as.ctx, as.callHdr)
+ callHdr: callHdr,
+ ac: ac,
+ ctx: ctx,
+ cancel: cancel,
+ opts: opts,
+ callInfo: c,
+ desc: desc,
+ codec: c.codec,
+ sendCompressorV0: cp,
+ sendCompressorV1: comp,
+ transport: t,
+ }
+
+ s, err := as.transport.NewStream(as.ctx, as.callHdr)
if err != nil {
err = toRPCErr(err)
return nil, err
}
- as.s = s
- as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
+ as.transportStream = s
+ as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
ac.incrCallsStarted()
if desc != unaryStreamDesc {
// Listen on stream context to cleanup when the stream context is
@@ -1335,29 +1335,31 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
}
type addrConnStream struct {
- s *transport.ClientStream
- ac *addrConn
- callHdr *transport.CallHdr
- cancel context.CancelFunc
- opts []CallOption
- callInfo *callInfo
- t transport.ClientTransport
- ctx context.Context
- sentLast bool
- desc *StreamDesc
- codec baseCodec
- cp Compressor
- comp encoding.Compressor
- decompSet bool
- dc Decompressor
- decomp encoding.Compressor
- p *parser
- mu sync.Mutex
- finished bool
+ transportStream *transport.ClientStream
+ ac *addrConn
+ callHdr *transport.CallHdr
+ cancel context.CancelFunc
+ opts []CallOption
+ callInfo *callInfo
+ transport transport.ClientTransport
+ ctx context.Context
+ sentLast bool
+ desc *StreamDesc
+ codec baseCodec
+ sendCompressorV0 Compressor
+ sendCompressorV1 encoding.Compressor
+ decompressorSet bool
+ decompressorV0 Decompressor
+ decompressorV1 encoding.Compressor
+ parser *parser
+
+ // mu guards finished and is held for the entire finish method.
+ mu sync.Mutex
+ finished bool
}
func (as *addrConnStream) Header() (metadata.MD, error) {
- m, err := as.s.Header()
+ m, err := as.transportStream.Header()
if err != nil {
as.finish(toRPCErr(err))
}
@@ -1365,7 +1367,7 @@ func (as *addrConnStream) Header() (metadata.MD, error) {
}
func (as *addrConnStream) Trailer() metadata.MD {
- return as.s.Trailer()
+ return as.transportStream.Trailer()
}
func (as *addrConnStream) CloseSend() error {
@@ -1375,7 +1377,7 @@ func (as *addrConnStream) CloseSend() error {
}
as.sentLast = true
- as.s.Write(nil, nil, &transport.WriteOptions{Last: true})
+ as.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true})
// Always return nil; io.EOF is the only error that might make sense
// instead, but there is no need to signal the client to call RecvMsg
// as the only use left for the stream after CloseSend is to call
@@ -1384,7 +1386,7 @@ func (as *addrConnStream) CloseSend() error {
}
func (as *addrConnStream) Context() context.Context {
- return as.s.Context()
+ return as.transportStream.Context()
}
func (as *addrConnStream) SendMsg(m any) (err error) {
@@ -1406,7 +1408,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) {
}
// load hdr, payload, data
- hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool)
+ hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.sendCompressorV0, as.sendCompressorV1, as.ac.dopts.copts.BufferPool)
if err != nil {
return err
}
@@ -1425,7 +1427,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize)
}
- if err := as.s.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil {
+ if err := as.transportStream.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil {
if !as.desc.ClientStreams {
// For non-client-streaming RPCs, we return nil instead of EOF on error
// because the generated code requires it. finish is not called; RecvMsg()
@@ -1446,25 +1448,25 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
}
}()
- if !as.decompSet {
+ if !as.decompressorSet {
// Block until we receive headers containing received message encoding.
- if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity {
- if as.dc == nil || as.dc.Type() != ct {
+ if ct := as.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity {
+ if as.decompressorV0 == nil || as.decompressorV0.Type() != ct {
// No configured decompressor, or it does not match the incoming
// message encoding; attempt to find a registered compressor that does.
- as.dc = nil
- as.decomp = encoding.GetCompressor(ct)
+ as.decompressorV0 = nil
+ as.decompressorV1 = encoding.GetCompressor(ct)
}
} else {
// No compression is used; disable our decompressor.
- as.dc = nil
+ as.decompressorV0 = nil
}
// Only initialize this state once per stream.
- as.decompSet = true
+ as.decompressorSet = true
}
- if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil {
+ if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil {
if err == io.EOF {
- if statusErr := as.s.Status().Err(); statusErr != nil {
+ if statusErr := as.transportStream.Status().Err(); statusErr != nil {
return statusErr
}
return io.EOF // indicates successful end of stream.
@@ -1479,8 +1481,8 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF {
- return as.s.Status().Err() // non-server streaming Recv returns nil on success
+ if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF {
+ return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success
} else if err != nil {
return toRPCErr(err)
}
@@ -1498,8 +1500,8 @@ func (as *addrConnStream) finish(err error) {
// Ending a stream with EOF indicates a success.
err = nil
}
- if as.s != nil {
- as.s.Close(err)
+ if as.transportStream != nil {
+ as.transportStream.Close(err)
}
if err != nil {
@@ -1570,10 +1572,10 @@ type serverStream struct {
p *parser
codec baseCodec
- cp Compressor
- dc Decompressor
- comp encoding.Compressor
- decomp encoding.Compressor
+ compressorV0 Compressor
+ compressorV1 encoding.Compressor
+ decompressorV0 Decompressor
+ decompressorV1 encoding.Compressor
sendCompressorName string
@@ -1669,12 +1671,12 @@ func (ss *serverStream) SendMsg(m any) (err error) {
// Server handler could have set new compressor by calling SetSendCompressor.
// In case it is set, we need to use it for compressing outbound message.
if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName {
- ss.comp = encoding.GetCompressor(sendCompressorsName)
+ ss.compressorV1 = encoding.GetCompressor(sendCompressorsName)
ss.sendCompressorName = sendCompressorsName
}
// load hdr, payload, data
- hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool)
+ hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.compressorV0, ss.compressorV1, ss.p.bufferPool)
if err != nil {
return err
}
@@ -1755,7 +1757,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
payInfo = &payloadInfo{}
defer payInfo.free()
}
- if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil {
+ if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil {
if err == io.EOF {
if len(ss.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{}
@@ -1766,7 +1768,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
return err
}
if err == io.ErrUnexpectedEOF {
- err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
+ err = status.Error(codes.Internal, io.ErrUnexpectedEOF.Error())
}
return toRPCErr(err)
}
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 6cbe116f2..51da8ed59 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.69.2"
+const Version = "1.72.1"
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
index 261ae41bd..bf1ae5948 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
@@ -25,6 +25,7 @@ import (
io "io"
proto "github.com/gogo/protobuf/proto"
+ k8s_io_api_admissionregistration_v1 "k8s.io/api/admissionregistration/v1"
v11 "k8s.io/api/admissionregistration/v1"
k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -46,10 +47,38 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ApplyConfiguration) Reset() { *m = ApplyConfiguration{} }
+func (*ApplyConfiguration) ProtoMessage() {}
+func (*ApplyConfiguration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7f7c65a4f012fb19, []int{0}
+}
+func (m *ApplyConfiguration) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ApplyConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ApplyConfiguration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ApplyConfiguration.Merge(m, src)
+}
+func (m *ApplyConfiguration) XXX_Size() int {
+ return m.Size()
+}
+func (m *ApplyConfiguration) XXX_DiscardUnknown() {
+ xxx_messageInfo_ApplyConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ApplyConfiguration proto.InternalMessageInfo
+
func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} }
func (*AuditAnnotation) ProtoMessage() {}
func (*AuditAnnotation) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{0}
+ return fileDescriptor_7f7c65a4f012fb19, []int{1}
}
func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -77,7 +106,7 @@ var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo
func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} }
func (*ExpressionWarning) ProtoMessage() {}
func (*ExpressionWarning) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{1}
+ return fileDescriptor_7f7c65a4f012fb19, []int{2}
}
func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -102,10 +131,38 @@ func (m *ExpressionWarning) XXX_DiscardUnknown() {
var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo
+func (m *JSONPatch) Reset() { *m = JSONPatch{} }
+func (*JSONPatch) ProtoMessage() {}
+func (*JSONPatch) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7f7c65a4f012fb19, []int{3}
+}
+func (m *JSONPatch) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *JSONPatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *JSONPatch) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_JSONPatch.Merge(m, src)
+}
+func (m *JSONPatch) XXX_Size() int {
+ return m.Size()
+}
+func (m *JSONPatch) XXX_DiscardUnknown() {
+ xxx_messageInfo_JSONPatch.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_JSONPatch proto.InternalMessageInfo
+
func (m *MatchCondition) Reset() { *m = MatchCondition{} }
func (*MatchCondition) ProtoMessage() {}
func (*MatchCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{2}
+ return fileDescriptor_7f7c65a4f012fb19, []int{4}
}
func (m *MatchCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -133,7 +190,7 @@ var xxx_messageInfo_MatchCondition proto.InternalMessageInfo
func (m *MatchResources) Reset() { *m = MatchResources{} }
func (*MatchResources) ProtoMessage() {}
func (*MatchResources) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{3}
+ return fileDescriptor_7f7c65a4f012fb19, []int{5}
}
func (m *MatchResources) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -158,10 +215,178 @@ func (m *MatchResources) XXX_DiscardUnknown() {
var xxx_messageInfo_MatchResources proto.InternalMessageInfo
+func (m *MutatingAdmissionPolicy) Reset() { *m = MutatingAdmissionPolicy{} }
+func (*MutatingAdmissionPolicy) ProtoMessage() {}
+func (*MutatingAdmissionPolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7f7c65a4f012fb19, []int{6}
+}
+func (m *MutatingAdmissionPolicy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MutatingAdmissionPolicy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MutatingAdmissionPolicy.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicy) XXX_Size() int {
+ return m.Size()
+}
+func (m *MutatingAdmissionPolicy) XXX_DiscardUnknown() {
+ xxx_messageInfo_MutatingAdmissionPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicy proto.InternalMessageInfo
+
+func (m *MutatingAdmissionPolicyBinding) Reset() { *m = MutatingAdmissionPolicyBinding{} }
+func (*MutatingAdmissionPolicyBinding) ProtoMessage() {}
+func (*MutatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7f7c65a4f012fb19, []int{7}
+}
+func (m *MutatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MutatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MutatingAdmissionPolicyBinding.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicyBinding) XXX_Size() int {
+ return m.Size()
+}
+func (m *MutatingAdmissionPolicyBinding) XXX_DiscardUnknown() {
+ xxx_messageInfo_MutatingAdmissionPolicyBinding.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicyBinding proto.InternalMessageInfo
+
+func (m *MutatingAdmissionPolicyBindingList) Reset() { *m = MutatingAdmissionPolicyBindingList{} }
+func (*MutatingAdmissionPolicyBindingList) ProtoMessage() {}
+func (*MutatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7f7c65a4f012fb19, []int{8}
+}
+func (m *MutatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MutatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MutatingAdmissionPolicyBindingList.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicyBindingList) XXX_Size() int {
+ return m.Size()
+}
+func (m *MutatingAdmissionPolicyBindingList) XXX_DiscardUnknown() {
+ xxx_messageInfo_MutatingAdmissionPolicyBindingList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicyBindingList proto.InternalMessageInfo
+
+func (m *MutatingAdmissionPolicyBindingSpec) Reset() { *m = MutatingAdmissionPolicyBindingSpec{} }
+func (*MutatingAdmissionPolicyBindingSpec) ProtoMessage() {}
+func (*MutatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7f7c65a4f012fb19, []int{9}
+}
+func (m *MutatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MutatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicyBindingSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *MutatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicyBindingSpec proto.InternalMessageInfo
+
+func (m *MutatingAdmissionPolicyList) Reset() { *m = MutatingAdmissionPolicyList{} }
+func (*MutatingAdmissionPolicyList) ProtoMessage() {}
+func (*MutatingAdmissionPolicyList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7f7c65a4f012fb19, []int{10}
+}
+func (m *MutatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MutatingAdmissionPolicyList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MutatingAdmissionPolicyList.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicyList) XXX_Size() int {
+ return m.Size()
+}
+func (m *MutatingAdmissionPolicyList) XXX_DiscardUnknown() {
+ xxx_messageInfo_MutatingAdmissionPolicyList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicyList proto.InternalMessageInfo
+
+func (m *MutatingAdmissionPolicySpec) Reset() { *m = MutatingAdmissionPolicySpec{} }
+func (*MutatingAdmissionPolicySpec) ProtoMessage() {}
+func (*MutatingAdmissionPolicySpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7f7c65a4f012fb19, []int{11}
+}
+func (m *MutatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MutatingAdmissionPolicySpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MutatingAdmissionPolicySpec.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicySpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *MutatingAdmissionPolicySpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_MutatingAdmissionPolicySpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicySpec proto.InternalMessageInfo
+
func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} }
func (*MutatingWebhook) ProtoMessage() {}
func (*MutatingWebhook) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{4}
+ return fileDescriptor_7f7c65a4f012fb19, []int{12}
}
func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -189,7 +414,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo
func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} }
func (*MutatingWebhookConfiguration) ProtoMessage() {}
func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{5}
+ return fileDescriptor_7f7c65a4f012fb19, []int{13}
}
func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -217,7 +442,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo
func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} }
func (*MutatingWebhookConfigurationList) ProtoMessage() {}
func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{6}
+ return fileDescriptor_7f7c65a4f012fb19, []int{14}
}
func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -242,10 +467,38 @@ func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() {
var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo
+func (m *Mutation) Reset() { *m = Mutation{} }
+func (*Mutation) ProtoMessage() {}
+func (*Mutation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7f7c65a4f012fb19, []int{15}
+}
+func (m *Mutation) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Mutation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Mutation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Mutation.Merge(m, src)
+}
+func (m *Mutation) XXX_Size() int {
+ return m.Size()
+}
+func (m *Mutation) XXX_DiscardUnknown() {
+ xxx_messageInfo_Mutation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Mutation proto.InternalMessageInfo
+
func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} }
func (*NamedRuleWithOperations) ProtoMessage() {}
func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{7}
+ return fileDescriptor_7f7c65a4f012fb19, []int{16}
}
func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -273,7 +526,7 @@ var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo
func (m *ParamKind) Reset() { *m = ParamKind{} }
func (*ParamKind) ProtoMessage() {}
func (*ParamKind) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{8}
+ return fileDescriptor_7f7c65a4f012fb19, []int{17}
}
func (m *ParamKind) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -301,7 +554,7 @@ var xxx_messageInfo_ParamKind proto.InternalMessageInfo
func (m *ParamRef) Reset() { *m = ParamRef{} }
func (*ParamRef) ProtoMessage() {}
func (*ParamRef) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{9}
+ return fileDescriptor_7f7c65a4f012fb19, []int{18}
}
func (m *ParamRef) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -329,7 +582,7 @@ var xxx_messageInfo_ParamRef proto.InternalMessageInfo
func (m *ServiceReference) Reset() { *m = ServiceReference{} }
func (*ServiceReference) ProtoMessage() {}
func (*ServiceReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{10}
+ return fileDescriptor_7f7c65a4f012fb19, []int{19}
}
func (m *ServiceReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -357,7 +610,7 @@ var xxx_messageInfo_ServiceReference proto.InternalMessageInfo
func (m *TypeChecking) Reset() { *m = TypeChecking{} }
func (*TypeChecking) ProtoMessage() {}
func (*TypeChecking) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{11}
+ return fileDescriptor_7f7c65a4f012fb19, []int{20}
}
func (m *TypeChecking) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -385,7 +638,7 @@ var xxx_messageInfo_TypeChecking proto.InternalMessageInfo
func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} }
func (*ValidatingAdmissionPolicy) ProtoMessage() {}
func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{12}
+ return fileDescriptor_7f7c65a4f012fb19, []int{21}
}
func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -413,7 +666,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo
func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} }
func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {}
func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{13}
+ return fileDescriptor_7f7c65a4f012fb19, []int{22}
}
func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -441,7 +694,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo
func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} }
func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {}
func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{14}
+ return fileDescriptor_7f7c65a4f012fb19, []int{23}
}
func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -469,7 +722,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageIn
func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} }
func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {}
func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{15}
+ return fileDescriptor_7f7c65a4f012fb19, []int{24}
}
func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -497,7 +750,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageIn
func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} }
func (*ValidatingAdmissionPolicyList) ProtoMessage() {}
func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{16}
+ return fileDescriptor_7f7c65a4f012fb19, []int{25}
}
func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -525,7 +778,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo
func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} }
func (*ValidatingAdmissionPolicySpec) ProtoMessage() {}
func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{17}
+ return fileDescriptor_7f7c65a4f012fb19, []int{26}
}
func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -553,7 +806,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo
func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} }
func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {}
func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{18}
+ return fileDescriptor_7f7c65a4f012fb19, []int{27}
}
func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -581,7 +834,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo
func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} }
func (*ValidatingWebhook) ProtoMessage() {}
func (*ValidatingWebhook) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{19}
+ return fileDescriptor_7f7c65a4f012fb19, []int{28}
}
func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -609,7 +862,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo
func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} }
func (*ValidatingWebhookConfiguration) ProtoMessage() {}
func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{20}
+ return fileDescriptor_7f7c65a4f012fb19, []int{29}
}
func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -637,7 +890,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo
func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} }
func (*ValidatingWebhookConfigurationList) ProtoMessage() {}
func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{21}
+ return fileDescriptor_7f7c65a4f012fb19, []int{30}
}
func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -665,7 +918,7 @@ var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo
func (m *Validation) Reset() { *m = Validation{} }
func (*Validation) ProtoMessage() {}
func (*Validation) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{22}
+ return fileDescriptor_7f7c65a4f012fb19, []int{31}
}
func (m *Validation) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -693,7 +946,7 @@ var xxx_messageInfo_Validation proto.InternalMessageInfo
func (m *Variable) Reset() { *m = Variable{} }
func (*Variable) ProtoMessage() {}
func (*Variable) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{23}
+ return fileDescriptor_7f7c65a4f012fb19, []int{32}
}
func (m *Variable) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -721,7 +974,7 @@ var xxx_messageInfo_Variable proto.InternalMessageInfo
func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} }
func (*WebhookClientConfig) ProtoMessage() {}
func (*WebhookClientConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{24}
+ return fileDescriptor_7f7c65a4f012fb19, []int{33}
}
func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -747,13 +1000,22 @@ func (m *WebhookClientConfig) XXX_DiscardUnknown() {
var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo
func init() {
+ proto.RegisterType((*ApplyConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ApplyConfiguration")
proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1beta1.AuditAnnotation")
proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1beta1.ExpressionWarning")
+ proto.RegisterType((*JSONPatch)(nil), "k8s.io.api.admissionregistration.v1beta1.JSONPatch")
proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchCondition")
proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchResources")
+ proto.RegisterType((*MutatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicy")
+ proto.RegisterType((*MutatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBinding")
+ proto.RegisterType((*MutatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBindingList")
+ proto.RegisterType((*MutatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBindingSpec")
+ proto.RegisterType((*MutatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyList")
+ proto.RegisterType((*MutatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicySpec")
proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook")
proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration")
proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList")
+ proto.RegisterType((*Mutation)(nil), "k8s.io.api.admissionregistration.v1beta1.Mutation")
proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.NamedRuleWithOperations")
proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamKind")
proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamRef")
@@ -779,130 +1041,174 @@ func init() {
}
var fileDescriptor_7f7c65a4f012fb19 = []byte{
- // 1957 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x1a, 0x4d, 0x6f, 0x1b, 0xc7,
- 0xd5, 0x2b, 0x52, 0x12, 0xf9, 0xa8, 0x2f, 0x4e, 0x9c, 0x8a, 0x76, 0x1c, 0x52, 0x58, 0x04, 0x85,
- 0x0c, 0xb4, 0x64, 0xac, 0x04, 0x89, 0xeb, 0xa0, 0x28, 0x44, 0xc5, 0x76, 0xed, 0x58, 0xb2, 0x30,
- 0x4a, 0x24, 0xa0, 0x4d, 0x00, 0x8f, 0x76, 0x87, 0xe4, 0x96, 0xe4, 0xee, 0x76, 0x67, 0x49, 0x5b,
- 0x2d, 0xd0, 0x16, 0xe8, 0x21, 0xd7, 0x02, 0xbd, 0x14, 0xe8, 0xa9, 0x7f, 0xa1, 0xf7, 0x02, 0xed,
- 0xcd, 0xc7, 0xdc, 0x6a, 0xa0, 0x28, 0x51, 0xb1, 0x87, 0x9e, 0x7a, 0xe8, 0xa1, 0x3d, 0xe8, 0xd2,
- 0x62, 0x66, 0x67, 0x3f, 0xb9, 0xb4, 0x56, 0xaa, 0xac, 0x5c, 0x7c, 0xd3, 0xbe, 0xcf, 0x79, 0x6f,
- 0xde, 0xd7, 0x3c, 0x0a, 0x6e, 0x77, 0x6f, 0xb3, 0xba, 0x61, 0x35, 0x88, 0x6d, 0x34, 0x88, 0xde,
- 0x37, 0x18, 0x33, 0x2c, 0xd3, 0xa1, 0x6d, 0x83, 0xb9, 0x0e, 0x71, 0x0d, 0xcb, 0x6c, 0x0c, 0x6f,
- 0x1d, 0x52, 0x97, 0xdc, 0x6a, 0xb4, 0xa9, 0x49, 0x1d, 0xe2, 0x52, 0xbd, 0x6e, 0x3b, 0x96, 0x6b,
- 0xa1, 0x75, 0x8f, 0xb3, 0x4e, 0x6c, 0xa3, 0x9e, 0xca, 0x59, 0x97, 0x9c, 0xd7, 0xbf, 0xdd, 0x36,
- 0xdc, 0xce, 0xe0, 0xb0, 0xae, 0x59, 0xfd, 0x46, 0xdb, 0x6a, 0x5b, 0x0d, 0x21, 0xe0, 0x70, 0xd0,
- 0x12, 0x5f, 0xe2, 0x43, 0xfc, 0xe5, 0x09, 0xbe, 0xfe, 0x5e, 0x86, 0x23, 0x25, 0x4f, 0x73, 0xfd,
- 0xfd, 0x90, 0xa9, 0x4f, 0xb4, 0x8e, 0x61, 0x52, 0xe7, 0xa8, 0x61, 0x77, 0xdb, 0x1c, 0xc0, 0x1a,
- 0x7d, 0xea, 0x92, 0x34, 0xae, 0xc6, 0x34, 0x2e, 0x67, 0x60, 0xba, 0x46, 0x9f, 0x4e, 0x30, 0x7c,
- 0x70, 0x1a, 0x03, 0xd3, 0x3a, 0xb4, 0x4f, 0x92, 0x7c, 0x2a, 0x83, 0xe5, 0xcd, 0x81, 0x6e, 0xb8,
- 0x9b, 0xa6, 0x69, 0xb9, 0xc2, 0x08, 0xf4, 0x36, 0xe4, 0xba, 0xf4, 0xa8, 0xa2, 0xac, 0x29, 0xeb,
- 0xc5, 0x66, 0xe9, 0xf9, 0xa8, 0x76, 0x65, 0x3c, 0xaa, 0xe5, 0x3e, 0xa1, 0x47, 0x98, 0xc3, 0xd1,
- 0x26, 0x2c, 0x0f, 0x49, 0x6f, 0x40, 0xef, 0x3e, 0xb3, 0x1d, 0x2a, 0x5c, 0x50, 0x99, 0x11, 0xa4,
- 0xab, 0x92, 0x74, 0x79, 0x3f, 0x8e, 0xc6, 0x49, 0x7a, 0xb5, 0x07, 0xe5, 0xf0, 0xeb, 0x80, 0x38,
- 0xa6, 0x61, 0xb6, 0xd1, 0xb7, 0xa0, 0xd0, 0x32, 0x68, 0x4f, 0xc7, 0xb4, 0x25, 0x05, 0xae, 0x48,
- 0x81, 0x85, 0x7b, 0x12, 0x8e, 0x03, 0x0a, 0x74, 0x13, 0xe6, 0x9f, 0x7a, 0x8c, 0x95, 0x9c, 0x20,
- 0x5e, 0x96, 0xc4, 0xf3, 0x52, 0x1e, 0xf6, 0xf1, 0x6a, 0x0b, 0x96, 0xb6, 0x89, 0xab, 0x75, 0xb6,
- 0x2c, 0x53, 0x37, 0x84, 0x85, 0x6b, 0x90, 0x37, 0x49, 0x9f, 0x4a, 0x13, 0x17, 0x24, 0x67, 0x7e,
- 0x87, 0xf4, 0x29, 0x16, 0x18, 0xb4, 0x01, 0x40, 0x93, 0xf6, 0x21, 0x49, 0x07, 0x11, 0xd3, 0x22,
- 0x54, 0xea, 0x9f, 0xf3, 0x52, 0x11, 0xa6, 0xcc, 0x1a, 0x38, 0x1a, 0x65, 0xe8, 0x19, 0x94, 0xb9,
- 0x38, 0x66, 0x13, 0x8d, 0xee, 0xd1, 0x1e, 0xd5, 0x5c, 0xcb, 0x11, 0x5a, 0x4b, 0x1b, 0xef, 0xd5,
- 0xc3, 0x30, 0x0d, 0x6e, 0xac, 0x6e, 0x77, 0xdb, 0x1c, 0xc0, 0xea, 0x3c, 0x30, 0xea, 0xc3, 0x5b,
- 0xf5, 0x47, 0xe4, 0x90, 0xf6, 0x7c, 0xd6, 0xe6, 0x9b, 0xe3, 0x51, 0xad, 0xbc, 0x93, 0x94, 0x88,
- 0x27, 0x95, 0x20, 0x0b, 0x96, 0xac, 0xc3, 0x1f, 0x51, 0xcd, 0x0d, 0xd4, 0xce, 0x9c, 0x5f, 0x2d,
- 0x1a, 0x8f, 0x6a, 0x4b, 0x8f, 0x63, 0xe2, 0x70, 0x42, 0x3c, 0xfa, 0x19, 0x2c, 0x3a, 0xd2, 0x6e,
- 0x3c, 0xe8, 0x51, 0x56, 0xc9, 0xad, 0xe5, 0xd6, 0x4b, 0x1b, 0x9b, 0xf5, 0xac, 0xd9, 0x58, 0xe7,
- 0x76, 0xe9, 0x9c, 0xf7, 0xc0, 0x70, 0x3b, 0x8f, 0x6d, 0xea, 0xa1, 0x59, 0xf3, 0x4d, 0xe9, 0xf7,
- 0x45, 0x1c, 0x95, 0x8f, 0xe3, 0xea, 0xd0, 0xaf, 0x15, 0xb8, 0x4a, 0x9f, 0x69, 0xbd, 0x81, 0x4e,
- 0x63, 0x74, 0x95, 0xfc, 0x45, 0x9d, 0xe3, 0x86, 0x3c, 0xc7, 0xd5, 0xbb, 0x29, 0x6a, 0x70, 0xaa,
- 0x72, 0xf4, 0x31, 0x94, 0xfa, 0x3c, 0x24, 0x76, 0xad, 0x9e, 0xa1, 0x1d, 0x55, 0xe6, 0x45, 0x20,
- 0xa9, 0xe3, 0x51, 0xad, 0xb4, 0x1d, 0x82, 0x4f, 0x46, 0xb5, 0xe5, 0xc8, 0xe7, 0xa7, 0x47, 0x36,
- 0xc5, 0x51, 0x36, 0xf5, 0x4f, 0x05, 0x58, 0xde, 0x1e, 0xf0, 0xf4, 0x34, 0xdb, 0x07, 0xf4, 0xb0,
- 0x63, 0x59, 0xdd, 0x0c, 0x31, 0xfc, 0x14, 0x16, 0xb4, 0x9e, 0x41, 0x4d, 0x77, 0xcb, 0x32, 0x5b,
- 0x46, 0x5b, 0x06, 0xc0, 0x77, 0xb3, 0x3b, 0x42, 0xaa, 0xda, 0x8a, 0x08, 0x69, 0x5e, 0x95, 0x8a,
- 0x16, 0xa2, 0x50, 0x1c, 0x53, 0x84, 0x3e, 0x87, 0x59, 0x27, 0x12, 0x02, 0x1f, 0x66, 0xd1, 0x58,
- 0x4f, 0x71, 0xf8, 0xa2, 0xd4, 0x35, 0xeb, 0x79, 0xd8, 0x13, 0x8a, 0x1e, 0xc1, 0x62, 0x8b, 0x18,
- 0xbd, 0x81, 0x43, 0xa5, 0x53, 0xf3, 0xc2, 0x03, 0xdf, 0xe4, 0x11, 0x72, 0x2f, 0x8a, 0x38, 0x19,
- 0xd5, 0xca, 0x31, 0x80, 0x70, 0x6c, 0x9c, 0x39, 0x79, 0x41, 0xc5, 0x73, 0x5d, 0x50, 0x7a, 0x9e,
- 0xcf, 0x7e, 0x3d, 0x79, 0x5e, 0x7a, 0xb5, 0x79, 0xfe, 0x31, 0x94, 0x98, 0xa1, 0xd3, 0xbb, 0xad,
- 0x16, 0xd5, 0x5c, 0x56, 0x99, 0x0b, 0x1d, 0xb6, 0x17, 0x82, 0xb9, 0xc3, 0xc2, 0xcf, 0xad, 0x1e,
- 0x61, 0x0c, 0x47, 0xd9, 0xd0, 0x1d, 0x58, 0xe2, 0x5d, 0xc9, 0x1a, 0xb8, 0x7b, 0x54, 0xb3, 0x4c,
- 0x9d, 0x89, 0xd4, 0x98, 0xf5, 0x4e, 0xf0, 0x69, 0x0c, 0x83, 0x13, 0x94, 0xe8, 0x33, 0x58, 0x0d,
- 0xa2, 0x08, 0xd3, 0xa1, 0x41, 0x9f, 0xee, 0x53, 0x87, 0x7f, 0xb0, 0x4a, 0x61, 0x2d, 0xb7, 0x5e,
- 0x6c, 0xbe, 0x35, 0x1e, 0xd5, 0x56, 0x37, 0xd3, 0x49, 0xf0, 0x34, 0x5e, 0xf4, 0x04, 0x90, 0x43,
- 0x0d, 0x73, 0x68, 0x69, 0x22, 0xfc, 0x64, 0x40, 0x80, 0xb0, 0xef, 0xdd, 0xf1, 0xa8, 0x86, 0xf0,
- 0x04, 0xf6, 0x64, 0x54, 0xfb, 0xc6, 0x24, 0x54, 0x84, 0x47, 0x8a, 0x2c, 0xf4, 0x53, 0x58, 0xee,
- 0xc7, 0x1a, 0x11, 0xab, 0x2c, 0x88, 0x0c, 0xb9, 0x9d, 0x3d, 0x27, 0xe3, 0x9d, 0x2c, 0xec, 0xb9,
- 0x71, 0x38, 0xc3, 0x49, 0x4d, 0xea, 0x5f, 0x15, 0xb8, 0x91, 0xa8, 0x21, 0x5e, 0xba, 0x0e, 0x3c,
- 0x0d, 0xe8, 0x09, 0x14, 0x78, 0x54, 0xe8, 0xc4, 0x25, 0xb2, 0x45, 0xbd, 0x9b, 0x2d, 0x86, 0xbc,
- 0x80, 0xd9, 0xa6, 0x2e, 0x09, 0x5b, 0x64, 0x08, 0xc3, 0x81, 0x54, 0xf4, 0x43, 0x28, 0x48, 0xcd,
- 0xac, 0x32, 0x23, 0x0c, 0xff, 0xce, 0x19, 0x0c, 0x8f, 0x9f, 0xbd, 0x99, 0xe7, 0xaa, 0x70, 0x20,
- 0x50, 0xfd, 0xa7, 0x02, 0x6b, 0x2f, 0xb3, 0xef, 0x91, 0xc1, 0x5c, 0xf4, 0xf9, 0x84, 0x8d, 0xf5,
- 0x8c, 0x79, 0x62, 0x30, 0xcf, 0xc2, 0x60, 0x26, 0xf1, 0x21, 0x11, 0xfb, 0xba, 0x30, 0x6b, 0xb8,
- 0xb4, 0xef, 0x1b, 0x77, 0xef, 0xdc, 0xc6, 0xc5, 0x0e, 0x1e, 0x96, 0xc1, 0x07, 0x5c, 0x38, 0xf6,
- 0x74, 0xa8, 0x2f, 0x14, 0x58, 0x9d, 0xd2, 0xa9, 0xd0, 0x87, 0x61, 0x2f, 0x16, 0x45, 0xa4, 0xa2,
- 0x88, 0xbc, 0x28, 0x47, 0x9b, 0xa8, 0x40, 0xe0, 0x38, 0x1d, 0xfa, 0xa5, 0x02, 0xc8, 0x99, 0x90,
- 0x27, 0x3b, 0xc7, 0xb9, 0xeb, 0xf8, 0x75, 0x69, 0x00, 0x9a, 0xc4, 0xe1, 0x14, 0x75, 0x2a, 0x81,
- 0xe2, 0x2e, 0x71, 0x48, 0xff, 0x13, 0xc3, 0xd4, 0xf9, 0x24, 0x46, 0x6c, 0x43, 0x66, 0xa9, 0xec,
- 0x76, 0x41, 0x98, 0x6d, 0xee, 0x3e, 0x90, 0x18, 0x1c, 0xa1, 0xe2, 0xbd, 0xb1, 0x6b, 0x98, 0xba,
- 0x9c, 0xdb, 0x82, 0xde, 0xc8, 0xe5, 0x61, 0x81, 0x51, 0x7f, 0x3f, 0x03, 0x05, 0xa1, 0x83, 0xcf,
- 0x92, 0xa7, 0xb7, 0xd2, 0x06, 0x14, 0x83, 0xd2, 0x2b, 0xa5, 0x96, 0x25, 0x59, 0x31, 0x28, 0xd3,
- 0x38, 0xa4, 0x41, 0x5f, 0x40, 0x81, 0xf9, 0x05, 0x39, 0x77, 0xfe, 0x82, 0xbc, 0xc0, 0x23, 0x2d,
- 0x28, 0xc5, 0x81, 0x48, 0xe4, 0xc2, 0xaa, 0xcd, 0x4f, 0x4f, 0x5d, 0xea, 0xec, 0x58, 0xee, 0x3d,
- 0x6b, 0x60, 0xea, 0x9b, 0x1a, 0xf7, 0x9e, 0xec, 0x86, 0x77, 0x78, 0x09, 0xdc, 0x4d, 0x27, 0x39,
- 0x19, 0xd5, 0xde, 0x9a, 0x82, 0x12, 0xa5, 0x6b, 0x9a, 0x68, 0xf5, 0x77, 0x0a, 0xac, 0xec, 0x51,
- 0x67, 0x68, 0x68, 0x14, 0xd3, 0x16, 0x75, 0xa8, 0xa9, 0x25, 0x5c, 0xa3, 0x64, 0x70, 0x8d, 0xef,
- 0xed, 0x99, 0xa9, 0xde, 0xbe, 0x01, 0x79, 0x9b, 0xb8, 0x1d, 0x39, 0xd8, 0x17, 0x38, 0x76, 0x97,
- 0xb8, 0x1d, 0x2c, 0xa0, 0x02, 0x6b, 0x39, 0xae, 0x30, 0x74, 0x56, 0x62, 0x2d, 0xc7, 0xc5, 0x02,
- 0xaa, 0xfe, 0x46, 0x81, 0x05, 0x6e, 0xc5, 0x56, 0x87, 0x6a, 0x5d, 0xfe, 0xac, 0xf8, 0x52, 0x01,
- 0x44, 0x93, 0x8f, 0x0d, 0x2f, 0x23, 0x4a, 0x1b, 0x1f, 0x65, 0x4f, 0xd1, 0x89, 0x07, 0x4b, 0x18,
- 0xd6, 0x13, 0x28, 0x86, 0x53, 0x54, 0xaa, 0x7f, 0x99, 0x81, 0x6b, 0xfb, 0xa4, 0x67, 0xe8, 0x22,
- 0xd5, 0x83, 0xfe, 0x24, 0x9b, 0xc3, 0xab, 0x2f, 0xbf, 0x06, 0xe4, 0x99, 0x4d, 0x35, 0x99, 0xcd,
- 0xf7, 0xb3, 0x9b, 0x3e, 0xf5, 0xd0, 0x7b, 0x36, 0xd5, 0xc2, 0x1b, 0xe4, 0x5f, 0x58, 0xa8, 0x40,
- 0x3f, 0x86, 0x39, 0xe6, 0x12, 0x77, 0xc0, 0x64, 0xf0, 0x3f, 0xb8, 0x08, 0x65, 0x42, 0x60, 0x73,
- 0x49, 0xaa, 0x9b, 0xf3, 0xbe, 0xb1, 0x54, 0xa4, 0xfe, 0x47, 0x81, 0xb5, 0xa9, 0xbc, 0x4d, 0xc3,
- 0xd4, 0x79, 0x30, 0xbc, 0x7a, 0x27, 0xdb, 0x31, 0x27, 0xef, 0x5c, 0x80, 0xdd, 0xf2, 0xec, 0xd3,
- 0x7c, 0xad, 0xfe, 0x5b, 0x81, 0x77, 0x4e, 0x63, 0xbe, 0x84, 0xe6, 0x67, 0xc5, 0x9b, 0xdf, 0xc3,
- 0x8b, 0xb3, 0x7c, 0x4a, 0x03, 0xfc, 0x32, 0x77, 0xba, 0xdd, 0xdc, 0x4d, 0xbc, 0x83, 0xd8, 0x02,
- 0xb8, 0x13, 0x16, 0xf9, 0xe0, 0x12, 0x77, 0x03, 0x0c, 0x8e, 0x50, 0x71, 0x5f, 0xd9, 0xb2, 0x3d,
- 0xc8, 0xab, 0xdc, 0xc8, 0x6e, 0x90, 0xdf, 0x58, 0xbc, 0xf2, 0xed, 0x7f, 0xe1, 0x40, 0x22, 0x72,
- 0x61, 0xa9, 0x1f, 0x5b, 0x14, 0xc8, 0x34, 0x39, 0xeb, 0x1c, 0x18, 0xf0, 0x7b, 0x73, 0x73, 0x1c,
- 0x86, 0x13, 0x3a, 0xd0, 0x01, 0x94, 0x87, 0xd2, 0x5f, 0x96, 0xe9, 0x95, 0x74, 0xef, 0x75, 0x5c,
- 0x6c, 0xde, 0xe4, 0xef, 0x8d, 0xfd, 0x24, 0xf2, 0x64, 0x54, 0x5b, 0x49, 0x02, 0xf1, 0xa4, 0x0c,
- 0xf5, 0x1f, 0x0a, 0xbc, 0x3d, 0xf5, 0x26, 0x2e, 0x21, 0xf4, 0x3a, 0xf1, 0xd0, 0xdb, 0xba, 0x88,
- 0xd0, 0x4b, 0x8f, 0xb9, 0xdf, 0xce, 0xbd, 0xc4, 0x52, 0x11, 0x6c, 0x4f, 0xa0, 0x68, 0xfb, 0xb3,
- 0x4b, 0xca, 0xa6, 0x27, 0x4b, 0xe4, 0x70, 0xd6, 0xe6, 0x22, 0xef, 0x9f, 0xc1, 0x27, 0x0e, 0x85,
- 0xa2, 0x9f, 0xc0, 0x8a, 0x3f, 0xdb, 0x73, 0x7e, 0xc3, 0x74, 0xfd, 0x01, 0xed, 0xfc, 0xe1, 0x73,
- 0x75, 0x3c, 0xaa, 0xad, 0x6c, 0x27, 0xa4, 0xe2, 0x09, 0x3d, 0xa8, 0x0b, 0xa5, 0xf0, 0xfa, 0xfd,
- 0xf7, 0xfd, 0xfb, 0x67, 0xf7, 0xb7, 0x65, 0x36, 0xdf, 0x90, 0x0e, 0x2e, 0x85, 0x30, 0x86, 0xa3,
- 0xd2, 0x2f, 0xf8, 0xa1, 0xff, 0x73, 0x58, 0x21, 0xf1, 0x45, 0x27, 0xab, 0xcc, 0x9e, 0xf5, 0x11,
- 0x92, 0x58, 0x95, 0x36, 0x2b, 0xd2, 0x88, 0x95, 0x04, 0x82, 0xe1, 0x09, 0x65, 0x69, 0xaf, 0xbf,
- 0xb9, 0xcb, 0x7a, 0xfd, 0x21, 0x0d, 0x8a, 0x43, 0xe2, 0x18, 0xe4, 0xb0, 0x47, 0xf9, 0x53, 0x3b,
- 0x77, 0xb6, 0x82, 0xb6, 0x2f, 0x59, 0xc3, 0xc9, 0xce, 0x87, 0x30, 0x1c, 0xca, 0x55, 0xff, 0x38,
- 0x03, 0xb5, 0x53, 0xda, 0x37, 0x7a, 0x08, 0xc8, 0x3a, 0x64, 0xd4, 0x19, 0x52, 0xfd, 0xbe, 0xb7,
- 0x8a, 0xf6, 0xc7, 0xfa, 0x5c, 0x38, 0x50, 0x3d, 0x9e, 0xa0, 0xc0, 0x29, 0x5c, 0xa8, 0x07, 0x0b,
- 0x6e, 0x64, 0xd4, 0x93, 0x59, 0xf0, 0x41, 0x76, 0xbb, 0xa2, 0x83, 0x62, 0x73, 0x65, 0x3c, 0xaa,
- 0xc5, 0x46, 0x47, 0x1c, 0x93, 0x8e, 0x34, 0x00, 0x2d, 0xbc, 0x3a, 0x2f, 0xf4, 0x1b, 0xd9, 0xaa,
- 0x58, 0x78, 0x63, 0x41, 0xdf, 0x89, 0x5c, 0x56, 0x44, 0xac, 0x7a, 0x3c, 0x0f, 0xe5, 0xd0, 0x85,
- 0xaf, 0x77, 0x7d, 0xaf, 0x77, 0x7d, 0x2f, 0xdd, 0xf5, 0xc1, 0xeb, 0x5d, 0xdf, 0xb9, 0x76, 0x7d,
- 0x29, 0xb5, 0xb8, 0x74, 0x69, 0x9b, 0xb8, 0x63, 0x05, 0xaa, 0x13, 0x39, 0x7e, 0xd9, 0xbb, 0xb8,
- 0x2f, 0x26, 0x76, 0x71, 0x1f, 0x9d, 0x67, 0x6c, 0x9a, 0xb6, 0x8d, 0xfb, 0x97, 0x02, 0xea, 0xcb,
- 0x6d, 0xbc, 0x84, 0xb9, 0xb0, 0x1f, 0x9f, 0x0b, 0xbf, 0xff, 0x7f, 0x18, 0x98, 0x65, 0x23, 0xf7,
- 0x5f, 0x05, 0x20, 0x1c, 0x66, 0xd0, 0x3b, 0x10, 0xf9, 0xa1, 0x50, 0x96, 0x6e, 0xcf, 0x4d, 0x11,
- 0x38, 0xba, 0x09, 0xf3, 0x7d, 0xca, 0x18, 0x69, 0xfb, 0x0b, 0x91, 0xe0, 0x77, 0xcc, 0x6d, 0x0f,
- 0x8c, 0x7d, 0x3c, 0x3a, 0x80, 0x39, 0x87, 0x12, 0x66, 0x99, 0x72, 0x31, 0xf2, 0x3d, 0xfe, 0x0a,
- 0xc6, 0x02, 0x72, 0x32, 0xaa, 0xdd, 0xca, 0xf2, 0x3b, 0x73, 0x5d, 0x3e, 0x9a, 0x05, 0x13, 0x96,
- 0xe2, 0xd0, 0x7d, 0x28, 0x4b, 0x1d, 0x91, 0x03, 0x7b, 0x95, 0xf6, 0x9a, 0x3c, 0x4d, 0x79, 0x3b,
- 0x49, 0x80, 0x27, 0x79, 0xd4, 0x87, 0x50, 0xf0, 0x07, 0x03, 0x54, 0x81, 0x7c, 0xe4, 0xbd, 0xe5,
- 0x19, 0x2e, 0x20, 0x09, 0xc7, 0xcc, 0xa4, 0x3b, 0x46, 0xfd, 0x83, 0x02, 0x6f, 0xa4, 0x34, 0x25,
- 0x74, 0x0d, 0x72, 0x03, 0xa7, 0x27, 0x5d, 0x30, 0x3f, 0x1e, 0xd5, 0x72, 0x9f, 0xe1, 0x47, 0x98,
- 0xc3, 0x10, 0x81, 0x79, 0xe6, 0xad, 0xa7, 0x64, 0x30, 0xdd, 0xc9, 0x7e, 0xe3, 0xc9, 0xbd, 0x56,
- 0xb3, 0xc4, 0xef, 0xc0, 0x87, 0xfa, 0x72, 0xd1, 0x3a, 0x14, 0x34, 0xd2, 0x1c, 0x98, 0x7a, 0xcf,
- 0xbb, 0xaf, 0x05, 0xef, 0x8d, 0xb7, 0xb5, 0xe9, 0xc1, 0x70, 0x80, 0x6d, 0xee, 0x3c, 0x3f, 0xae,
- 0x5e, 0xf9, 0xea, 0xb8, 0x7a, 0xe5, 0xc5, 0x71, 0xf5, 0xca, 0x2f, 0xc6, 0x55, 0xe5, 0xf9, 0xb8,
- 0xaa, 0x7c, 0x35, 0xae, 0x2a, 0x2f, 0xc6, 0x55, 0xe5, 0x6f, 0xe3, 0xaa, 0xf2, 0xab, 0xbf, 0x57,
- 0xaf, 0xfc, 0x60, 0x3d, 0xeb, 0x7f, 0x39, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x6f, 0xf2, 0xe8,
- 0x4a, 0x10, 0x21, 0x00, 0x00,
+ // 2215 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0x4d, 0x6c, 0x1b, 0xc7,
+ 0x15, 0xf6, 0x92, 0x92, 0x45, 0x3e, 0xca, 0x92, 0x38, 0x71, 0x2a, 0xfa, 0x8f, 0x14, 0x16, 0x41,
+ 0x21, 0x03, 0x2d, 0x59, 0x2b, 0x41, 0xe2, 0x3a, 0x29, 0x02, 0xae, 0x62, 0x3b, 0x76, 0x24, 0x59,
+ 0x18, 0x39, 0x52, 0xd1, 0x26, 0x40, 0x56, 0xcb, 0x21, 0xb9, 0x11, 0xb9, 0xcb, 0xee, 0x2c, 0x65,
+ 0xab, 0x05, 0xda, 0x02, 0x2d, 0x90, 0x1e, 0x0b, 0xf4, 0x52, 0xa0, 0xa7, 0xde, 0x7b, 0x69, 0xef,
+ 0x05, 0x7a, 0xf4, 0x31, 0xb7, 0x1a, 0x28, 0x4a, 0x54, 0x4c, 0xd1, 0x9e, 0x7a, 0x48, 0x81, 0xf6,
+ 0xa0, 0x4b, 0x8b, 0x99, 0x9d, 0xfd, 0xdf, 0x95, 0x56, 0xb2, 0x2c, 0x17, 0x85, 0x6f, 0xda, 0xf7,
+ 0xe6, 0xbd, 0x37, 0xef, 0xcd, 0x9b, 0xf7, 0xbe, 0x79, 0x22, 0xdc, 0xdc, 0xb9, 0x49, 0xeb, 0xba,
+ 0xd9, 0x50, 0x07, 0x7a, 0x43, 0x6d, 0xf5, 0x75, 0x4a, 0x75, 0xd3, 0xb0, 0x48, 0x47, 0xa7, 0xb6,
+ 0xa5, 0xda, 0xba, 0x69, 0x34, 0x76, 0x6f, 0x6c, 0x13, 0x5b, 0xbd, 0xd1, 0xe8, 0x10, 0x83, 0x58,
+ 0xaa, 0x4d, 0x5a, 0xf5, 0x81, 0x65, 0xda, 0x26, 0x5a, 0x74, 0x24, 0xeb, 0xea, 0x40, 0xaf, 0x27,
+ 0x4a, 0xd6, 0x85, 0xe4, 0xe5, 0xaf, 0x77, 0x74, 0xbb, 0x3b, 0xdc, 0xae, 0x6b, 0x66, 0xbf, 0xd1,
+ 0x31, 0x3b, 0x66, 0x83, 0x2b, 0xd8, 0x1e, 0xb6, 0xf9, 0x17, 0xff, 0xe0, 0x7f, 0x39, 0x8a, 0x2f,
+ 0xbf, 0x9e, 0x61, 0x4b, 0xd1, 0xdd, 0x5c, 0x7e, 0xc3, 0x17, 0xea, 0xab, 0x5a, 0x57, 0x37, 0x88,
+ 0xb5, 0xd7, 0x18, 0xec, 0x74, 0x18, 0x81, 0x36, 0xfa, 0xc4, 0x56, 0x93, 0xa4, 0x1a, 0x69, 0x52,
+ 0xd6, 0xd0, 0xb0, 0xf5, 0x3e, 0x89, 0x09, 0xbc, 0x79, 0x94, 0x00, 0xd5, 0xba, 0xa4, 0xaf, 0x46,
+ 0xe5, 0xe4, 0xf7, 0x01, 0x35, 0x07, 0x83, 0xde, 0xde, 0xb2, 0x69, 0xb4, 0xf5, 0xce, 0xd0, 0xf1,
+ 0x03, 0x2d, 0x01, 0x90, 0xc7, 0x03, 0x8b, 0x70, 0x0f, 0x2b, 0xd2, 0x82, 0xb4, 0x58, 0x54, 0xd0,
+ 0x93, 0x51, 0xed, 0xdc, 0x78, 0x54, 0x83, 0xdb, 0x1e, 0x07, 0x07, 0x56, 0xc9, 0x14, 0x66, 0x9b,
+ 0xc3, 0x96, 0x6e, 0x37, 0x0d, 0xc3, 0xb4, 0x1d, 0x35, 0xd7, 0x20, 0xbf, 0x43, 0xf6, 0x84, 0x7c,
+ 0x49, 0xc8, 0xe7, 0x3f, 0x20, 0x7b, 0x98, 0xd1, 0x51, 0x13, 0x66, 0x77, 0xd5, 0xde, 0x90, 0xf8,
+ 0x0a, 0x2b, 0x39, 0xbe, 0x74, 0x5e, 0x2c, 0x9d, 0xdd, 0x0c, 0xb3, 0x71, 0x74, 0xbd, 0xdc, 0x83,
+ 0xb2, 0xff, 0xb5, 0xa5, 0x5a, 0x86, 0x6e, 0x74, 0xd0, 0xd7, 0xa0, 0xd0, 0xd6, 0x49, 0xaf, 0x85,
+ 0x49, 0x5b, 0x28, 0x9c, 0x13, 0x0a, 0x0b, 0x77, 0x04, 0x1d, 0x7b, 0x2b, 0xd0, 0x75, 0x98, 0x7a,
+ 0xe4, 0x08, 0x56, 0xf2, 0x7c, 0xf1, 0xac, 0x58, 0x3c, 0x25, 0xf4, 0x61, 0x97, 0x2f, 0xbf, 0x0b,
+ 0xc5, 0xfb, 0x1b, 0x0f, 0xd6, 0xd6, 0x55, 0x5b, 0xeb, 0x9e, 0x28, 0x46, 0x6d, 0x98, 0x59, 0x65,
+ 0xc2, 0xcb, 0xa6, 0xd1, 0xd2, 0x79, 0x88, 0x16, 0x60, 0xc2, 0x50, 0xfb, 0x44, 0xc8, 0x4f, 0x0b,
+ 0xf9, 0x89, 0x35, 0xb5, 0x4f, 0x30, 0xe7, 0x44, 0xec, 0xe4, 0x32, 0xd9, 0xf9, 0xe3, 0x84, 0x30,
+ 0x84, 0x09, 0x35, 0x87, 0x96, 0x46, 0x28, 0x7a, 0x0c, 0x65, 0xa6, 0x8e, 0x0e, 0x54, 0x8d, 0x6c,
+ 0x90, 0x1e, 0xd1, 0x6c, 0xd3, 0xe2, 0x56, 0x4b, 0x4b, 0xaf, 0xd7, 0xfd, 0x1b, 0xe3, 0x25, 0x4f,
+ 0x7d, 0xb0, 0xd3, 0x61, 0x04, 0x5a, 0x67, 0x39, 0x5a, 0xdf, 0xbd, 0x51, 0x5f, 0x51, 0xb7, 0x49,
+ 0xcf, 0x15, 0x55, 0x5e, 0x1d, 0x8f, 0x6a, 0xe5, 0xb5, 0xa8, 0x46, 0x1c, 0x37, 0x82, 0x4c, 0x98,
+ 0x31, 0xb7, 0x3f, 0x25, 0x9a, 0xed, 0x99, 0xcd, 0x9d, 0xdc, 0x2c, 0x1a, 0x8f, 0x6a, 0x33, 0x0f,
+ 0x42, 0xea, 0x70, 0x44, 0x3d, 0xfa, 0x21, 0x5c, 0xb0, 0x84, 0xdf, 0x78, 0xd8, 0x23, 0xb4, 0x92,
+ 0x5f, 0xc8, 0x2f, 0x96, 0x96, 0x9a, 0xf5, 0xac, 0x85, 0xa1, 0xce, 0xfc, 0x6a, 0x31, 0xd9, 0x2d,
+ 0xdd, 0xee, 0x3e, 0x18, 0x10, 0x87, 0x4d, 0x95, 0x57, 0x45, 0xdc, 0x2f, 0xe0, 0xa0, 0x7e, 0x1c,
+ 0x36, 0x87, 0x7e, 0x21, 0xc1, 0x45, 0xf2, 0x58, 0xeb, 0x0d, 0x5b, 0x24, 0xb4, 0xae, 0x32, 0x71,
+ 0x5a, 0xfb, 0xb8, 0x2a, 0xf6, 0x71, 0xf1, 0x76, 0x82, 0x19, 0x9c, 0x68, 0x1c, 0xbd, 0x07, 0xa5,
+ 0x3e, 0x4b, 0x89, 0x75, 0xb3, 0xa7, 0x6b, 0x7b, 0x95, 0x29, 0x9e, 0x48, 0xf2, 0x78, 0x54, 0x2b,
+ 0xad, 0xfa, 0xe4, 0x83, 0x51, 0x6d, 0x36, 0xf0, 0xf9, 0x70, 0x6f, 0x40, 0x70, 0x50, 0x4c, 0xfe,
+ 0xab, 0x04, 0xf3, 0xab, 0x43, 0x76, 0xbf, 0x8d, 0x4e, 0xd3, 0xdd, 0xbb, 0xc3, 0x43, 0x9f, 0x40,
+ 0x81, 0x1d, 0x5a, 0x4b, 0xb5, 0x55, 0x91, 0x59, 0xdf, 0xc8, 0x76, 0xc4, 0xce, 0x79, 0xae, 0x12,
+ 0x5b, 0xf5, 0x33, 0xdb, 0xa7, 0x61, 0x4f, 0x2b, 0xea, 0xc0, 0x04, 0x1d, 0x10, 0x4d, 0x24, 0xd0,
+ 0xed, 0xec, 0x81, 0x4c, 0xd9, 0xf2, 0xc6, 0x80, 0x68, 0xfe, 0xa5, 0x63, 0x5f, 0x98, 0x1b, 0x90,
+ 0xff, 0x29, 0x41, 0x35, 0x45, 0x46, 0xd1, 0x8d, 0x16, 0xab, 0x32, 0xcf, 0xdf, 0x5b, 0x23, 0xe4,
+ 0xed, 0xca, 0x33, 0x7b, 0x2b, 0x76, 0x9e, 0xea, 0xf4, 0x97, 0x12, 0xc8, 0x87, 0x8b, 0xae, 0xe8,
+ 0xd4, 0x46, 0x1f, 0xc5, 0x1c, 0xaf, 0x67, 0xbc, 0xc9, 0x3a, 0x75, 0xdc, 0xf6, 0xca, 0xb1, 0x4b,
+ 0x09, 0x38, 0xdd, 0x87, 0x49, 0xdd, 0x26, 0x7d, 0x5a, 0xc9, 0xf1, 0xcb, 0xf2, 0xfe, 0x69, 0x79,
+ 0xad, 0x5c, 0x10, 0x46, 0x27, 0xef, 0x31, 0xf5, 0xd8, 0xb1, 0x22, 0xff, 0x26, 0x77, 0x94, 0xcf,
+ 0x2c, 0x40, 0xac, 0x08, 0x0f, 0x38, 0x71, 0xcd, 0x2f, 0xd6, 0xde, 0xe1, 0xad, 0x7b, 0x1c, 0x1c,
+ 0x58, 0xc5, 0xe2, 0x34, 0x50, 0x2d, 0xb5, 0xef, 0xb6, 0xa1, 0xd2, 0xd2, 0x52, 0x76, 0x67, 0xd6,
+ 0x85, 0xa4, 0x32, 0xcd, 0xe2, 0xe4, 0x7e, 0x61, 0x4f, 0x23, 0xb2, 0x61, 0xa6, 0x1f, 0xaa, 0xf0,
+ 0xbc, 0x7b, 0x95, 0x96, 0x6e, 0x1e, 0x23, 0x60, 0x21, 0x79, 0xa7, 0xb4, 0x86, 0x69, 0x38, 0x62,
+ 0x43, 0xfe, 0x42, 0x82, 0x2b, 0x29, 0xe1, 0x3a, 0x83, 0xdc, 0x68, 0x87, 0x73, 0xa3, 0xf9, 0xec,
+ 0xb9, 0x91, 0x9c, 0x14, 0xbf, 0x3a, 0x9f, 0xea, 0x25, 0xcf, 0x86, 0x4f, 0xa0, 0xc8, 0xcf, 0xe1,
+ 0x03, 0xdd, 0x68, 0x25, 0xf4, 0xd0, 0x2c, 0x47, 0xcb, 0x44, 0x95, 0x0b, 0xe3, 0x51, 0xad, 0xe8,
+ 0x7d, 0x62, 0x5f, 0x29, 0xfa, 0x3e, 0xcc, 0xf5, 0x05, 0x50, 0x60, 0xf2, 0xba, 0x61, 0x53, 0x91,
+ 0x43, 0x27, 0x3f, 0xdf, 0x8b, 0xe3, 0x51, 0x6d, 0x6e, 0x35, 0xa2, 0x15, 0xc7, 0xec, 0x20, 0x0d,
+ 0x8a, 0xbb, 0xaa, 0xa5, 0xab, 0xdb, 0x7e, 0xeb, 0x3c, 0x46, 0xe2, 0x6e, 0x0a, 0x51, 0xa5, 0x2c,
+ 0x42, 0x5b, 0x74, 0x29, 0x14, 0xfb, 0x7a, 0x99, 0x91, 0xfe, 0xd0, 0x81, 0x89, 0x6e, 0x5f, 0x5c,
+ 0x3a, 0xee, 0x71, 0x9a, 0x86, 0x6f, 0xc4, 0xa5, 0x50, 0xec, 0xeb, 0x45, 0x2b, 0x70, 0xa1, 0xad,
+ 0xea, 0xbd, 0xa1, 0x45, 0x44, 0xd3, 0x9b, 0xe4, 0x17, 0xf7, 0xab, 0xac, 0x83, 0xdf, 0x09, 0x32,
+ 0x0e, 0x46, 0xb5, 0x72, 0x88, 0xc0, 0x1b, 0x5f, 0x58, 0x18, 0xfd, 0x00, 0x66, 0xfb, 0x21, 0xf0,
+ 0x46, 0x2b, 0xe7, 0xf9, 0xc6, 0x8f, 0x7b, 0x24, 0x9e, 0x02, 0x1f, 0xe8, 0x86, 0xe9, 0x14, 0x47,
+ 0x2d, 0xa1, 0x9f, 0x49, 0x80, 0x2c, 0xa2, 0x1b, 0xbb, 0xa6, 0xc6, 0x35, 0x86, 0xba, 0xf8, 0xb7,
+ 0x85, 0x1a, 0x84, 0x63, 0x2b, 0x0e, 0x46, 0xb5, 0x5b, 0x19, 0x9e, 0x2d, 0xf5, 0xb8, 0x24, 0x0f,
+ 0x41, 0x82, 0x4d, 0xf9, 0x6f, 0x05, 0x98, 0x75, 0x6f, 0xc7, 0x16, 0xd9, 0xee, 0x9a, 0xe6, 0x4e,
+ 0x06, 0x18, 0xfb, 0x08, 0xa6, 0xb5, 0x9e, 0x4e, 0x0c, 0xdb, 0x79, 0x69, 0x88, 0x6c, 0xfe, 0x56,
+ 0xf6, 0xd0, 0x09, 0x53, 0xcb, 0x01, 0x25, 0xca, 0x45, 0x61, 0x68, 0x3a, 0x48, 0xc5, 0x21, 0x43,
+ 0xe8, 0x23, 0x98, 0xb4, 0x02, 0x28, 0xf0, 0xad, 0x2c, 0x16, 0xeb, 0x09, 0x98, 0xcb, 0x2b, 0x15,
+ 0x0e, 0xc8, 0x72, 0x94, 0xc6, 0x53, 0x6c, 0xe2, 0x59, 0x52, 0x2c, 0x82, 0xd1, 0x8a, 0x27, 0xc2,
+ 0x68, 0xc9, 0x50, 0x7f, 0xf2, 0xc5, 0x40, 0xfd, 0xd2, 0xf3, 0x85, 0xfa, 0xef, 0x41, 0x89, 0xea,
+ 0x2d, 0x72, 0xbb, 0xdd, 0x26, 0x9a, 0xcd, 0xee, 0xa3, 0x17, 0xb0, 0x0d, 0x9f, 0xcc, 0x02, 0xe6,
+ 0x7f, 0x2e, 0xf7, 0x54, 0x4a, 0x71, 0x50, 0x0c, 0xdd, 0x82, 0x19, 0xf6, 0x46, 0x36, 0x87, 0xf6,
+ 0x06, 0xd1, 0x4c, 0xa3, 0x45, 0xf9, 0xbd, 0x9a, 0x74, 0x76, 0xf0, 0x30, 0xc4, 0xc1, 0x91, 0x95,
+ 0xe8, 0x43, 0x98, 0xf7, 0xb2, 0x08, 0x93, 0x5d, 0x9d, 0x3c, 0xda, 0x24, 0x16, 0xe5, 0xd5, 0xa1,
+ 0xb0, 0x90, 0x5f, 0x2c, 0x2a, 0x57, 0xc6, 0xa3, 0xda, 0x7c, 0x33, 0x79, 0x09, 0x4e, 0x93, 0x45,
+ 0x3f, 0x4d, 0xbe, 0xef, 0xc0, 0x1d, 0x7c, 0x78, 0x56, 0x77, 0x3d, 0xa9, 0xe6, 0x4d, 0x9f, 0x55,
+ 0xcd, 0x93, 0xff, 0x2c, 0xc1, 0xd5, 0x48, 0xa1, 0x09, 0x8f, 0x29, 0x9e, 0x3f, 0x04, 0xff, 0x2e,
+ 0x14, 0x84, 0x65, 0x17, 0x74, 0x7c, 0xf3, 0xf8, 0xa0, 0x43, 0x68, 0x50, 0x26, 0x98, 0x29, 0xec,
+ 0x29, 0x94, 0xff, 0x21, 0xc1, 0xc2, 0x61, 0xfe, 0x9d, 0x01, 0xa2, 0xda, 0x09, 0x23, 0xaa, 0x3b,
+ 0x27, 0x76, 0x2e, 0xb4, 0xf1, 0x14, 0x58, 0xf5, 0xdb, 0x1c, 0x14, 0xdc, 0x3e, 0x8d, 0xde, 0x61,
+ 0x18, 0xca, 0xd6, 0xba, 0x2c, 0xf5, 0xc4, 0x54, 0xa3, 0xea, 0x36, 0xf3, 0x75, 0x97, 0x71, 0x10,
+ 0xfc, 0xc0, 0xbe, 0x00, 0xbf, 0x1e, 0x6a, 0x6c, 0x6e, 0x25, 0x20, 0xf0, 0x3b, 0xd9, 0xbd, 0x88,
+ 0xcf, 0xbe, 0x94, 0xaf, 0xb0, 0xcb, 0x15, 0xa7, 0xe3, 0x04, 0x7b, 0x0c, 0x08, 0x7e, 0x4a, 0x4d,
+ 0x83, 0x6f, 0x91, 0x57, 0xfe, 0x63, 0x01, 0x41, 0x6f, 0x96, 0xe4, 0x00, 0x41, 0xef, 0x13, 0xfb,
+ 0x4a, 0xe5, 0xa7, 0x12, 0xcc, 0xa7, 0x4c, 0x01, 0xd0, 0x5b, 0xfe, 0x9c, 0x83, 0x57, 0xe7, 0x8a,
+ 0xc4, 0x0b, 0x4e, 0x39, 0x38, 0xa0, 0xe0, 0x0c, 0x1c, 0x5e, 0x87, 0x7e, 0xc2, 0x8a, 0x4b, 0x4c,
+ 0x9f, 0x68, 0xc9, 0x27, 0x6e, 0x90, 0x97, 0x3d, 0x14, 0x12, 0xe3, 0xe1, 0x04, 0x73, 0xb2, 0x0a,
+ 0x3e, 0xf6, 0x65, 0x0f, 0x2c, 0x75, 0xa0, 0x8b, 0xf2, 0x17, 0x7d, 0x60, 0x35, 0xd7, 0xef, 0x09,
+ 0x0e, 0x0e, 0xac, 0x62, 0xa0, 0x63, 0x87, 0x21, 0xf0, 0x5c, 0x18, 0x74, 0x70, 0x2c, 0xcd, 0x39,
+ 0xf2, 0xef, 0x72, 0xe0, 0xbd, 0x9d, 0x32, 0x60, 0x94, 0x06, 0x14, 0xbd, 0x9e, 0x26, 0xb4, 0x7a,
+ 0x00, 0xd3, 0xeb, 0x7f, 0xd8, 0x5f, 0x83, 0x3e, 0x86, 0x02, 0x75, 0x3b, 0x5d, 0xfe, 0xe4, 0x9d,
+ 0x8e, 0xbf, 0xf1, 0xbc, 0x1e, 0xe7, 0xa9, 0x44, 0x36, 0xcc, 0xf3, 0x27, 0x01, 0xb1, 0x89, 0xb5,
+ 0x66, 0xda, 0x77, 0xcc, 0xa1, 0xd1, 0x6a, 0x6a, 0x3c, 0xd3, 0x1d, 0x98, 0x71, 0x8b, 0xf5, 0x96,
+ 0xf5, 0xe4, 0x25, 0x07, 0xa3, 0xda, 0x95, 0x14, 0x16, 0xbf, 0x4d, 0x69, 0xaa, 0xe5, 0x5f, 0x4b,
+ 0x30, 0xb7, 0x41, 0xac, 0x5d, 0x5d, 0x23, 0x98, 0xb4, 0x89, 0x45, 0x0c, 0x2d, 0x12, 0x1a, 0x29,
+ 0x43, 0x68, 0xdc, 0x68, 0xe7, 0x52, 0xa3, 0x7d, 0x15, 0x26, 0x06, 0xaa, 0xdd, 0x15, 0x53, 0xd7,
+ 0x02, 0xe3, 0xae, 0xab, 0x76, 0x17, 0x73, 0x2a, 0xe7, 0x9a, 0x96, 0xcd, 0x1d, 0x9d, 0x14, 0x5c,
+ 0xd3, 0xb2, 0x31, 0xa7, 0xca, 0xbf, 0x94, 0x60, 0x9a, 0x79, 0xb1, 0xdc, 0x25, 0xda, 0x8e, 0x6e,
+ 0x74, 0xd0, 0x67, 0x12, 0x20, 0x12, 0x9d, 0x04, 0x3b, 0x37, 0xa2, 0xb4, 0xf4, 0x76, 0xf6, 0x3b,
+ 0x19, 0x9b, 0x26, 0xfb, 0x69, 0x1d, 0x63, 0x51, 0x9c, 0x60, 0x52, 0xfe, 0x53, 0x0e, 0x2e, 0x6d,
+ 0xaa, 0x3d, 0xbd, 0xf5, 0x82, 0x66, 0x64, 0x7a, 0x68, 0x6a, 0x74, 0xf7, 0x38, 0x2f, 0xb7, 0x94,
+ 0x4d, 0xa7, 0x0d, 0x8c, 0xd0, 0xf7, 0xe0, 0x3c, 0xb5, 0x55, 0x7b, 0xe8, 0xce, 0x1e, 0xee, 0x9d,
+ 0x86, 0x31, 0xae, 0x50, 0x99, 0x11, 0xe6, 0xce, 0x3b, 0xdf, 0x58, 0x18, 0x92, 0xff, 0x2d, 0xc1,
+ 0x42, 0xaa, 0xec, 0xd9, 0x8d, 0xe6, 0x06, 0xa1, 0x20, 0xaf, 0x9d, 0x82, 0xdf, 0x47, 0x0d, 0xe7,
+ 0xfe, 0x25, 0xc1, 0x6b, 0x47, 0x09, 0x9f, 0x01, 0x60, 0x30, 0xc3, 0x80, 0xe1, 0xfe, 0xe9, 0x79,
+ 0x9e, 0x02, 0x1a, 0x3e, 0xcb, 0x1f, 0xed, 0xf7, 0xcb, 0x11, 0x5d, 0xe0, 0x1f, 0x3d, 0x5b, 0x50,
+ 0xde, 0x15, 0xf1, 0x32, 0x0d, 0xa7, 0xa4, 0x3b, 0x13, 0x96, 0xa2, 0x72, 0x9d, 0x3d, 0xe4, 0x36,
+ 0xa3, 0xcc, 0x83, 0x51, 0x6d, 0x2e, 0x4a, 0xc4, 0x71, 0x1d, 0xf2, 0xdf, 0x25, 0xb8, 0x96, 0x7a,
+ 0x12, 0x67, 0x90, 0x7a, 0xdd, 0x70, 0xea, 0x2d, 0x9f, 0x46, 0xea, 0xa5, 0xce, 0xff, 0xae, 0x1d,
+ 0x5a, 0x0d, 0xff, 0xcf, 0x27, 0x80, 0x3b, 0x50, 0xf2, 0x8f, 0xdf, 0x1d, 0x9c, 0xbc, 0x71, 0xfc,
+ 0x78, 0x9b, 0x86, 0xf2, 0x8a, 0x08, 0x70, 0xc9, 0xa7, 0x51, 0x1c, 0xd4, 0x7e, 0xca, 0x13, 0x94,
+ 0x1f, 0xc1, 0x9c, 0x1a, 0xfe, 0x2f, 0x34, 0xad, 0x4c, 0x1e, 0xf7, 0xe1, 0x16, 0xf9, 0x3f, 0xb6,
+ 0x52, 0x11, 0x4e, 0xcc, 0x45, 0x18, 0x14, 0xc7, 0x8c, 0xbd, 0xd8, 0x29, 0x61, 0x68, 0x74, 0x3b,
+ 0xf5, 0x7c, 0x46, 0xb7, 0xf2, 0x1f, 0x72, 0x50, 0x3b, 0xa2, 0x7d, 0xa3, 0xfb, 0x80, 0xcc, 0x6d,
+ 0x4a, 0xac, 0x5d, 0xd2, 0xba, 0xeb, 0xfc, 0xe2, 0xc0, 0x85, 0xf5, 0x79, 0x1f, 0x50, 0x3d, 0x88,
+ 0xad, 0xc0, 0x09, 0x52, 0xa8, 0x07, 0xd3, 0x76, 0x00, 0xea, 0x89, 0x5b, 0xf0, 0x66, 0x76, 0xbf,
+ 0x82, 0x40, 0x51, 0x99, 0x1b, 0x8f, 0x6a, 0x21, 0xe8, 0x88, 0x43, 0xda, 0x91, 0x06, 0xa0, 0xf9,
+ 0x47, 0xe7, 0xa4, 0x7e, 0x23, 0x5b, 0x15, 0xf3, 0x4f, 0xcc, 0xeb, 0x3b, 0x81, 0xc3, 0x0a, 0xa8,
+ 0x95, 0xf7, 0xa7, 0xa0, 0xec, 0x87, 0xf0, 0xe5, 0x10, 0xf5, 0xe5, 0x10, 0xf5, 0xd0, 0x21, 0x2a,
+ 0xbc, 0x1c, 0xa2, 0x9e, 0x68, 0x88, 0x9a, 0x50, 0x8b, 0x4b, 0x67, 0x36, 0xbd, 0xdc, 0x97, 0xa0,
+ 0x1a, 0xbb, 0xe3, 0x67, 0x3d, 0xbf, 0xfc, 0x38, 0x36, 0xbf, 0x7c, 0xfb, 0x24, 0xb0, 0x29, 0x6d,
+ 0x82, 0xf9, 0xa5, 0x04, 0xf2, 0xe1, 0x3e, 0xfe, 0x4f, 0xff, 0x62, 0xe0, 0xf0, 0xad, 0xa7, 0x80,
+ 0xc3, 0xff, 0x48, 0x00, 0x3e, 0x98, 0x41, 0xaf, 0x41, 0xe0, 0x47, 0x58, 0xa2, 0x74, 0x3b, 0x61,
+ 0x0a, 0xd0, 0xd1, 0x75, 0x98, 0xea, 0x13, 0x4a, 0xd5, 0x8e, 0x3b, 0x10, 0xf1, 0x7e, 0x64, 0xb6,
+ 0xea, 0x90, 0xb1, 0xcb, 0x47, 0x5b, 0x70, 0xde, 0x22, 0x2a, 0x15, 0xd3, 0xcc, 0xa2, 0xf2, 0x2e,
+ 0x7b, 0x05, 0x63, 0x4e, 0x39, 0x18, 0xd5, 0x6e, 0x64, 0xf9, 0x39, 0x61, 0x5d, 0x3c, 0x9a, 0xb9,
+ 0x10, 0x16, 0xea, 0xd0, 0x5d, 0x28, 0x0b, 0x1b, 0x81, 0x0d, 0x3b, 0x95, 0xf6, 0x92, 0xd8, 0x4d,
+ 0x79, 0x35, 0xba, 0x00, 0xc7, 0x65, 0xe4, 0xfb, 0x50, 0x70, 0x81, 0x01, 0xaa, 0xc0, 0x44, 0xe0,
+ 0xbd, 0xe5, 0x38, 0xce, 0x29, 0x91, 0xc0, 0xe4, 0x92, 0x03, 0x23, 0xff, 0x5e, 0x82, 0x57, 0x12,
+ 0x9a, 0x12, 0xba, 0x04, 0xf9, 0xa1, 0xd5, 0x13, 0x21, 0x98, 0x1a, 0x8f, 0x6a, 0xf9, 0x0f, 0xf1,
+ 0x0a, 0x66, 0x34, 0xa4, 0xc2, 0x14, 0x75, 0xc6, 0x53, 0x22, 0x99, 0x6e, 0x65, 0x3f, 0xf1, 0xe8,
+ 0x5c, 0x4b, 0x29, 0xb1, 0x33, 0x70, 0xa9, 0xae, 0x5e, 0xb4, 0x08, 0x05, 0x4d, 0x55, 0x86, 0x46,
+ 0xab, 0xe7, 0x9c, 0xd7, 0xb4, 0xf3, 0xc6, 0x5b, 0x6e, 0x3a, 0x34, 0xec, 0x71, 0x95, 0xb5, 0x27,
+ 0xfb, 0xd5, 0x73, 0x9f, 0xef, 0x57, 0xcf, 0x3d, 0xdd, 0xaf, 0x9e, 0xfb, 0xf1, 0xb8, 0x2a, 0x3d,
+ 0x19, 0x57, 0xa5, 0xcf, 0xc7, 0x55, 0xe9, 0xe9, 0xb8, 0x2a, 0xfd, 0x65, 0x5c, 0x95, 0x7e, 0xfe,
+ 0x45, 0xf5, 0xdc, 0x77, 0x16, 0xb3, 0xfe, 0x98, 0xf5, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x13,
+ 0x7c, 0x49, 0xa4, 0xf7, 0x2a, 0x00, 0x00,
+}
+
+func (m *ApplyConfiguration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ApplyConfiguration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ApplyConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Expression)
+ copy(dAtA[i:], m.Expression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) {
@@ -971,6 +1277,34 @@ func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *JSONPatch) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *JSONPatch) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *JSONPatch) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Expression)
+ copy(dAtA[i:], m.Expression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *MatchCondition) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -1086,7 +1420,7 @@ func (m *MatchResources) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicy) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1096,112 +1430,18 @@ func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.MatchConditions) > 0 {
- for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x62
- }
- }
- if m.ObjectSelector != nil {
- {
- size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x5a
- }
- if m.ReinvocationPolicy != nil {
- i -= len(*m.ReinvocationPolicy)
- copy(dAtA[i:], *m.ReinvocationPolicy)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy)))
- i--
- dAtA[i] = 0x52
- }
- if m.MatchPolicy != nil {
- i -= len(*m.MatchPolicy)
- copy(dAtA[i:], *m.MatchPolicy)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy)))
- i--
- dAtA[i] = 0x4a
- }
- if len(m.AdmissionReviewVersions) > 0 {
- for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.AdmissionReviewVersions[iNdEx])
- copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx])))
- i--
- dAtA[i] = 0x42
- }
- }
- if m.TimeoutSeconds != nil {
- i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
- i--
- dAtA[i] = 0x38
- }
- if m.SideEffects != nil {
- i -= len(*m.SideEffects)
- copy(dAtA[i:], *m.SideEffects)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects)))
- i--
- dAtA[i] = 0x32
- }
- if m.NamespaceSelector != nil {
- {
- size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- if m.FailurePolicy != nil {
- i -= len(*m.FailurePolicy)
- copy(dAtA[i:], *m.FailurePolicy)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
- i--
- dAtA[i] = 0x22
- }
- if len(m.Rules) > 0 {
- for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
{
- size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -1210,15 +1450,20 @@ func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
}
i--
dAtA[i] = 0x12
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1228,30 +1473,26 @@ func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Webhooks) > 0 {
- for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x12
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
@@ -1265,7 +1506,7 @@ func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, e
return len(dAtA) - i, nil
}
-func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1275,12 +1516,12 @@ func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -1312,7 +1553,7 @@ func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (in
return len(dAtA) - i, nil
}
-func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1322,39 +1563,49 @@ func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- {
- size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
+ if m.MatchResources != nil {
+ {
+ size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
}
- i--
- dAtA[i] = 0x12
- if len(m.ResourceNames) > 0 {
- for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.ResourceNames[iNdEx])
- copy(dAtA[i:], m.ResourceNames[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx])))
- i--
- dAtA[i] = 0xa
+ if m.ParamRef != nil {
+ {
+ size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x12
}
+ i -= len(m.PolicyName)
+ copy(dAtA[i:], m.PolicyName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName)))
+ i--
+ dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ParamKind) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1364,30 +1615,44 @@ func (m *ParamKind) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- i -= len(m.Kind)
- copy(dAtA[i:], m.Kind)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
- i--
- dAtA[i] = 0x12
- i -= len(m.APIVersion)
- copy(dAtA[i:], m.APIVersion)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ParamRef) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1397,26 +1662,73 @@ func (m *ParamRef) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if m.ParameterNotFoundAction != nil {
- i -= len(*m.ParameterNotFoundAction)
- copy(dAtA[i:], *m.ParameterNotFoundAction)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction)))
+ i -= len(m.ReinvocationPolicy)
+ copy(dAtA[i:], m.ReinvocationPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReinvocationPolicy)))
+ i--
+ dAtA[i] = 0x3a
+ if len(m.MatchConditions) > 0 {
+ for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if m.FailurePolicy != nil {
+ i -= len(*m.FailurePolicy)
+ copy(dAtA[i:], *m.FailurePolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
i--
- dAtA[i] = 0x22
+ dAtA[i] = 0x2a
}
- if m.Selector != nil {
+ if len(m.Mutations) > 0 {
+ for iNdEx := len(m.Mutations) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Mutations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.Variables) > 0 {
+ for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.MatchConstraints != nil {
{
- size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -1424,67 +1736,24 @@ func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0x1a
- }
- i -= len(m.Namespace)
- copy(dAtA[i:], m.Namespace)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
- i--
- dAtA[i] = 0x12
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ServiceReference) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Port != nil {
- i = encodeVarintGenerated(dAtA, i, uint64(*m.Port))
- i--
- dAtA[i] = 0x20
+ dAtA[i] = 0x12
}
- if m.Path != nil {
- i -= len(*m.Path)
- copy(dAtA[i:], *m.Path)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path)))
+ if m.ParamKind != nil {
+ {
+ size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
i--
- dAtA[i] = 0x1a
+ dAtA[i] = 0xa
}
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0x12
- i -= len(m.Namespace)
- copy(dAtA[i:], m.Namespace)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
- i--
- dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *TypeChecking) Marshal() (dAtA []byte, err error) {
+func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1494,20 +1763,20 @@ func (m *TypeChecking) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ExpressionWarnings) > 0 {
- for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- {
+ if len(m.MatchConditions) > 0 {
+ for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
{
- size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -1515,54 +1784,91 @@ func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0xa
+ dAtA[i] = 0x62
}
}
- return len(dAtA) - i, nil
-}
-
-func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+ if m.ObjectSelector != nil {
+ {
+ size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
}
- return dAtA[:n], nil
-}
-
-func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
+ if m.ReinvocationPolicy != nil {
+ i -= len(*m.ReinvocationPolicy)
+ copy(dAtA[i:], *m.ReinvocationPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy)))
+ i--
+ dAtA[i] = 0x52
+ }
+ if m.MatchPolicy != nil {
+ i -= len(*m.MatchPolicy)
+ copy(dAtA[i:], *m.MatchPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy)))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if len(m.AdmissionReviewVersions) > 0 {
+ for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.AdmissionReviewVersions[iNdEx])
+ copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx])))
+ i--
+ dAtA[i] = 0x42
}
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- i--
- dAtA[i] = 0x1a
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
+ if m.TimeoutSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
+ i--
+ dAtA[i] = 0x38
+ }
+ if m.SideEffects != nil {
+ i -= len(*m.SideEffects)
+ copy(dAtA[i:], *m.SideEffects)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.NamespaceSelector != nil {
+ {
+ size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.FailurePolicy != nil {
+ i -= len(*m.FailurePolicy)
+ copy(dAtA[i:], *m.FailurePolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Rules) > 0 {
+ for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
}
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- i--
- dAtA[i] = 0x12
{
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -1570,11 +1876,16 @@ func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, erro
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) {
+func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1584,26 +1895,30 @@ func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
+ if len(m.Webhooks) > 0 {
+ for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
}
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- i--
- dAtA[i] = 0x12
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
@@ -1617,7 +1932,7 @@ func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (in
return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) {
+func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1627,12 +1942,12 @@ func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error
return dAtA[:n], nil
}
-func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -1664,7 +1979,7 @@ func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte)
return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) {
+func (m *Mutation) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1674,28 +1989,19 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error
return dAtA[:n], nil
}
-func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) {
+func (m *Mutation) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *Mutation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ValidationActions) > 0 {
- for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.ValidationActions[iNdEx])
- copy(dAtA[i:], m.ValidationActions[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx])))
- i--
- dAtA[i] = 0x22
- }
- }
- if m.MatchResources != nil {
+ if m.JSONPatch != nil {
{
- size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.JSONPatch.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -1703,11 +2009,11 @@ func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte)
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0x1a
+ dAtA[i] = 0x22
}
- if m.ParamRef != nil {
+ if m.ApplyConfiguration != nil {
{
- size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.ApplyConfiguration.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -1715,17 +2021,17 @@ func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte)
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0x12
+ dAtA[i] = 0x1a
}
- i -= len(m.PolicyName)
- copy(dAtA[i:], m.PolicyName)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName)))
+ i -= len(m.PatchType)
+ copy(dAtA[i:], m.PatchType)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PatchType)))
i--
- dAtA[i] = 0xa
+ dAtA[i] = 0x12
return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
+func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1735,32 +2041,18 @@ func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) {
+func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Items) > 0 {
- for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
{
- size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -1768,11 +2060,20 @@ func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int,
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0xa
+ dAtA[i] = 0x12
+ if len(m.ResourceNames) > 0 {
+ for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ResourceNames[iNdEx])
+ copy(dAtA[i:], m.ResourceNames[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
+func (m *ParamKind) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1782,94 +2083,59 @@ func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) {
+func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Variables) > 0 {
- for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- if len(m.MatchConditions) > 0 {
- for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- }
- if len(m.AuditAnnotations) > 0 {
- for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
+ i -= len(m.Kind)
+ copy(dAtA[i:], m.Kind)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.APIVersion)
+ copy(dAtA[i:], m.APIVersion)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ParamRef) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- if m.FailurePolicy != nil {
- i -= len(*m.FailurePolicy)
- copy(dAtA[i:], *m.FailurePolicy)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
+ return dAtA[:n], nil
+}
+
+func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ParameterNotFoundAction != nil {
+ i -= len(*m.ParameterNotFoundAction)
+ copy(dAtA[i:], *m.ParameterNotFoundAction)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction)))
i--
dAtA[i] = 0x22
}
- if len(m.Validations) > 0 {
- for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if m.MatchConstraints != nil {
- {
- size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.ParamKind != nil {
+ if m.Selector != nil {
{
- size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -1877,12 +2143,22 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int,
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0xa
+ dAtA[i] = 0x1a
}
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) {
+func (m *ServiceReference) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1892,49 +2168,42 @@ func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) {
+func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Conditions) > 0 {
- for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
+ if m.Port != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Port))
+ i--
+ dAtA[i] = 0x20
}
- if m.TypeChecking != nil {
- {
- size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
+ if m.Path != nil {
+ i -= len(*m.Path)
+ copy(dAtA[i:], *m.Path)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path)))
i--
- dAtA[i] = 0x12
+ dAtA[i] = 0x1a
}
- i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
i--
- dAtA[i] = 0x8
+ dAtA[i] = 0x12
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) {
+func (m *TypeChecking) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1944,20 +2213,20 @@ func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) {
+func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.MatchConditions) > 0 {
- for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
+ if len(m.ExpressionWarnings) > 0 {
+ for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- {
{
- size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -1965,84 +2234,44 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0x5a
- }
- }
- if m.ObjectSelector != nil {
- {
- size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
+ dAtA[i] = 0xa
}
- i--
- dAtA[i] = 0x52
}
- if m.MatchPolicy != nil {
- i -= len(*m.MatchPolicy)
- copy(dAtA[i:], *m.MatchPolicy)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy)))
- i--
- dAtA[i] = 0x4a
+ return len(dAtA) - i, nil
+}
+
+func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- if len(m.AdmissionReviewVersions) > 0 {
- for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.AdmissionReviewVersions[iNdEx])
- copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx])))
- i--
- dAtA[i] = 0x42
- }
- }
- if m.TimeoutSeconds != nil {
- i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
- i--
- dAtA[i] = 0x38
- }
- if m.SideEffects != nil {
- i -= len(*m.SideEffects)
- copy(dAtA[i:], *m.SideEffects)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects)))
- i--
- dAtA[i] = 0x32
- }
- if m.NamespaceSelector != nil {
- {
- size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- if m.FailurePolicy != nil {
- i -= len(*m.FailurePolicy)
- copy(dAtA[i:], *m.FailurePolicy)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
- i--
- dAtA[i] = 0x22
- }
- if len(m.Rules) > 0 {
- for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
+ return dAtA[:n], nil
+}
+
+func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x1a
{
- size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -2051,15 +2280,20 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
}
i--
dAtA[i] = 0x12
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
+func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2069,30 +2303,26 @@ func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Webhooks) > 0 {
- for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x12
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
@@ -2106,7 +2336,7 @@ func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int,
return len(dAtA) - i, nil
}
-func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
+func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2116,12 +2346,12 @@ func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error)
return dAtA[:n], nil
}
-func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -2153,7 +2383,7 @@ func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (
return len(dAtA) - i, nil
}
-func (m *Validation) Marshal() (dAtA []byte, err error) {
+func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2163,42 +2393,58 @@ func (m *Validation) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *Validation) MarshalTo(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- i -= len(m.MessageExpression)
- copy(dAtA[i:], m.MessageExpression)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression)))
- i--
- dAtA[i] = 0x22
- if m.Reason != nil {
- i -= len(*m.Reason)
- copy(dAtA[i:], *m.Reason)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason)))
+ if len(m.ValidationActions) > 0 {
+ for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ValidationActions[iNdEx])
+ copy(dAtA[i:], m.ValidationActions[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if m.MatchResources != nil {
+ {
+ size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
i--
dAtA[i] = 0x1a
}
- i -= len(m.Message)
- copy(dAtA[i:], m.Message)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
- i--
- dAtA[i] = 0x12
- i -= len(m.Expression)
- copy(dAtA[i:], m.Expression)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+ if m.ParamRef != nil {
+ {
+ size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.PolicyName)
+ copy(dAtA[i:], m.PolicyName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *Variable) Marshal() (dAtA []byte, err error) {
+func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2208,30 +2454,44 @@ func (m *Variable) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *Variable) MarshalTo(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- i -= len(m.Expression)
- copy(dAtA[i:], m.Expression)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
- i--
- dAtA[i] = 0x12
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) {
+func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2241,335 +2501,636 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if m.URL != nil {
- i -= len(*m.URL)
- copy(dAtA[i:], *m.URL)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL)))
- i--
- dAtA[i] = 0x1a
- }
- if m.CABundle != nil {
- i -= len(m.CABundle)
- copy(dAtA[i:], m.CABundle)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle)))
- i--
- dAtA[i] = 0x12
- }
- if m.Service != nil {
- {
- size, err := m.Service.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
+ if len(m.Variables) > 0 {
+ for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x3a
}
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
- offset -= sovGenerated(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
}
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *AuditAnnotation) Size() (n int) {
- if m == nil {
- return 0
+ if len(m.MatchConditions) > 0 {
+ for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
}
- var l int
- _ = l
- l = len(m.Key)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.ValueExpression)
- n += 1 + l + sovGenerated(uint64(l))
- return n
+ if len(m.AuditAnnotations) > 0 {
+ for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.FailurePolicy != nil {
+ i -= len(*m.FailurePolicy)
+ copy(dAtA[i:], *m.FailurePolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Validations) > 0 {
+ for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.MatchConstraints != nil {
+ {
+ size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ParamKind != nil {
+ {
+ size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
}
-func (m *ExpressionWarning) Size() (n int) {
- if m == nil {
- return 0
+func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- var l int
- _ = l
- l = len(m.FieldRef)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Warning)
- n += 1 + l + sovGenerated(uint64(l))
- return n
+ return dAtA[:n], nil
}
-func (m *MatchCondition) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Expression)
- n += 1 + l + sovGenerated(uint64(l))
- return n
+func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *MatchResources) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if m.NamespaceSelector != nil {
- l = m.NamespaceSelector.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.ObjectSelector != nil {
- l = m.ObjectSelector.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if len(m.ResourceRules) > 0 {
- for _, e := range m.ResourceRules {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
}
}
- if len(m.ExcludeResourceRules) > 0 {
- for _, e := range m.ExcludeResourceRules {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if m.TypeChecking != nil {
+ {
+ size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x12
}
- if m.MatchPolicy != nil {
- l = len(*m.MatchPolicy)
- n += 1 + l + sovGenerated(uint64(l))
- }
- return n
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
}
-func (m *MutatingWebhook) Size() (n int) {
- if m == nil {
- return 0
+func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- l = m.ClientConfig.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Rules) > 0 {
- for _, e := range m.Rules {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.MatchConditions) > 0 {
+ for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
}
}
- if m.FailurePolicy != nil {
- l = len(*m.FailurePolicy)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.NamespaceSelector != nil {
- l = m.NamespaceSelector.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.SideEffects != nil {
- l = len(*m.SideEffects)
- n += 1 + l + sovGenerated(uint64(l))
+ if m.ObjectSelector != nil {
+ {
+ size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
}
- if m.TimeoutSeconds != nil {
- n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
+ if m.MatchPolicy != nil {
+ i -= len(*m.MatchPolicy)
+ copy(dAtA[i:], *m.MatchPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy)))
+ i--
+ dAtA[i] = 0x4a
}
if len(m.AdmissionReviewVersions) > 0 {
- for _, s := range m.AdmissionReviewVersions {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
+ for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.AdmissionReviewVersions[iNdEx])
+ copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx])))
+ i--
+ dAtA[i] = 0x42
}
}
- if m.MatchPolicy != nil {
- l = len(*m.MatchPolicy)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.ReinvocationPolicy != nil {
- l = len(*m.ReinvocationPolicy)
- n += 1 + l + sovGenerated(uint64(l))
+ if m.TimeoutSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
+ i--
+ dAtA[i] = 0x38
}
- if m.ObjectSelector != nil {
- l = m.ObjectSelector.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if m.SideEffects != nil {
+ i -= len(*m.SideEffects)
+ copy(dAtA[i:], *m.SideEffects)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects)))
+ i--
+ dAtA[i] = 0x32
}
- if len(m.MatchConditions) > 0 {
- for _, e := range m.MatchConditions {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if m.NamespaceSelector != nil {
+ {
+ size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x2a
}
- return n
-}
-
-func (m *MutatingWebhookConfiguration) Size() (n int) {
- if m == nil {
- return 0
+ if m.FailurePolicy != nil {
+ i -= len(*m.FailurePolicy)
+ copy(dAtA[i:], *m.FailurePolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
+ i--
+ dAtA[i] = 0x22
}
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Webhooks) > 0 {
- for _, e := range m.Webhooks {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Rules) > 0 {
+ for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
}
}
- return n
-}
-
-func (m *MutatingWebhookConfigurationList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ {
+ size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- return n
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *NamedRuleWithOperations) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceNames) > 0 {
- for _, s := range m.ResourceNames {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
- }
+func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- l = m.RuleWithOperations.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
+ return dAtA[:n], nil
}
-func (m *ParamKind) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.APIVersion)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Kind)
- n += 1 + l + sovGenerated(uint64(l))
- return n
+func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ParamRef) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Namespace)
- n += 1 + l + sovGenerated(uint64(l))
- if m.Selector != nil {
- l = m.Selector.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Webhooks) > 0 {
+ for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
}
- if m.ParameterNotFoundAction != nil {
- l = len(*m.ParameterNotFoundAction)
- n += 1 + l + sovGenerated(uint64(l))
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- return n
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *ServiceReference) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Namespace)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- if m.Path != nil {
- l = len(*m.Path)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.Port != nil {
- n += 1 + sovGenerated(uint64(*m.Port))
+func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- return n
+ return dAtA[:n], nil
}
-func (m *TypeChecking) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if len(m.ExpressionWarnings) > 0 {
- for _, e := range m.ExpressionWarnings {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
}
}
- return n
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicy) Size() (n int) {
- if m == nil {
- return 0
+func (m *Validation) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Status.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
+ return dAtA[:n], nil
}
-func (m *ValidatingAdmissionPolicyBinding) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
+func (m *Validation) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.MessageExpression)
+ copy(dAtA[i:], m.MessageExpression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression)))
+ i--
+ dAtA[i] = 0x22
+ if m.Reason != nil {
+ i -= len(*m.Reason)
+ copy(dAtA[i:], *m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Expression)
+ copy(dAtA[i:], m.Expression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Variable) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Variable) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Expression)
+ copy(dAtA[i:], m.Expression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.URL != nil {
+ i -= len(*m.URL)
+ copy(dAtA[i:], *m.URL)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.CABundle != nil {
+ i -= len(m.CABundle)
+ copy(dAtA[i:], m.CABundle)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Service != nil {
+ {
+ size, err := m.Service.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *ApplyConfiguration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *AuditAnnotation) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ValueExpression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ExpressionWarning) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.FieldRef)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Warning)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *JSONPatch) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *MatchCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *MatchResources) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NamespaceSelector != nil {
+ l = m.NamespaceSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ObjectSelector != nil {
+ l = m.ObjectSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.ResourceRules) > 0 {
+ for _, e := range m.ResourceRules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.ExcludeResourceRules) > 0 {
+ for _, e := range m.ExcludeResourceRules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.MatchPolicy != nil {
+ l = len(*m.MatchPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *MutatingAdmissionPolicy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Spec.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
-func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) {
+func (m *MutatingAdmissionPolicyBinding) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *MutatingAdmissionPolicyBindingList) Size() (n int) {
if m == nil {
return 0
}
@@ -2586,7 +3147,7 @@ func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) {
return n
}
-func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) {
+func (m *MutatingAdmissionPolicyBindingSpec) Size() (n int) {
if m == nil {
return 0
}
@@ -2602,16 +3163,10 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) {
l = m.MatchResources.Size()
n += 1 + l + sovGenerated(uint64(l))
}
- if len(m.ValidationActions) > 0 {
- for _, s := range m.ValidationActions {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
return n
}
-func (m *ValidatingAdmissionPolicyList) Size() (n int) {
+func (m *MutatingAdmissionPolicyList) Size() (n int) {
if m == nil {
return 0
}
@@ -2628,7 +3183,7 @@ func (m *ValidatingAdmissionPolicyList) Size() (n int) {
return n
}
-func (m *ValidatingAdmissionPolicySpec) Size() (n int) {
+func (m *MutatingAdmissionPolicySpec) Size() (n int) {
if m == nil {
return 0
}
@@ -2642,8 +3197,14 @@ func (m *ValidatingAdmissionPolicySpec) Size() (n int) {
l = m.MatchConstraints.Size()
n += 1 + l + sovGenerated(uint64(l))
}
- if len(m.Validations) > 0 {
- for _, e := range m.Validations {
+ if len(m.Variables) > 0 {
+ for _, e := range m.Variables {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Mutations) > 0 {
+ for _, e := range m.Mutations {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
@@ -2652,48 +3213,18 @@ func (m *ValidatingAdmissionPolicySpec) Size() (n int) {
l = len(*m.FailurePolicy)
n += 1 + l + sovGenerated(uint64(l))
}
- if len(m.AuditAnnotations) > 0 {
- for _, e := range m.AuditAnnotations {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
if len(m.MatchConditions) > 0 {
for _, e := range m.MatchConditions {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
- if len(m.Variables) > 0 {
- for _, e := range m.Variables {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *ValidatingAdmissionPolicyStatus) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovGenerated(uint64(m.ObservedGeneration))
- if m.TypeChecking != nil {
- l = m.TypeChecking.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if len(m.Conditions) > 0 {
- for _, e := range m.Conditions {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
+ l = len(m.ReinvocationPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
return n
}
-func (m *ValidatingWebhook) Size() (n int) {
+func (m *MutatingWebhook) Size() (n int) {
if m == nil {
return 0
}
@@ -2734,6 +3265,10 @@ func (m *ValidatingWebhook) Size() (n int) {
l = len(*m.MatchPolicy)
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.ReinvocationPolicy != nil {
+ l = len(*m.ReinvocationPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
if m.ObjectSelector != nil {
l = m.ObjectSelector.Size()
n += 1 + l + sovGenerated(uint64(l))
@@ -2747,7 +3282,7 @@ func (m *ValidatingWebhook) Size() (n int) {
return n
}
-func (m *ValidatingWebhookConfiguration) Size() (n int) {
+func (m *MutatingWebhookConfiguration) Size() (n int) {
if m == nil {
return 0
}
@@ -2764,7 +3299,7 @@ func (m *ValidatingWebhookConfiguration) Size() (n int) {
return n
}
-func (m *ValidatingWebhookConfigurationList) Size() (n int) {
+func (m *MutatingWebhookConfigurationList) Size() (n int) {
if m == nil {
return 0
}
@@ -2781,476 +3316,1911 @@ func (m *ValidatingWebhookConfigurationList) Size() (n int) {
return n
}
-func (m *Validation) Size() (n int) {
+func (m *Mutation) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = len(m.Expression)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Message)
+ l = len(m.PatchType)
n += 1 + l + sovGenerated(uint64(l))
- if m.Reason != nil {
- l = len(*m.Reason)
+ if m.ApplyConfiguration != nil {
+ l = m.ApplyConfiguration.Size()
n += 1 + l + sovGenerated(uint64(l))
}
- l = len(m.MessageExpression)
+ if m.JSONPatch != nil {
+ l = m.JSONPatch.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *NamedRuleWithOperations) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ResourceNames) > 0 {
+ for _, s := range m.ResourceNames {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.RuleWithOperations.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
-func (m *Variable) Size() (n int) {
+func (m *ParamKind) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = len(m.Name)
+ l = len(m.APIVersion)
n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Expression)
+ l = len(m.Kind)
n += 1 + l + sovGenerated(uint64(l))
return n
}
-func (m *WebhookClientConfig) Size() (n int) {
+func (m *ParamRef) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- if m.Service != nil {
- l = m.Service.Size()
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Selector != nil {
+ l = m.Selector.Size()
n += 1 + l + sovGenerated(uint64(l))
}
- if m.CABundle != nil {
- l = len(m.CABundle)
+ if m.ParameterNotFoundAction != nil {
+ l = len(*m.ParameterNotFoundAction)
n += 1 + l + sovGenerated(uint64(l))
}
- if m.URL != nil {
- l = len(*m.URL)
+ return n
+}
+
+func (m *ServiceReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Path != nil {
+ l = len(*m.Path)
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.Port != nil {
+ n += 1 + sovGenerated(uint64(*m.Port))
+ }
return n
}
-func sovGenerated(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozGenerated(x uint64) (n int) {
- return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+func (m *TypeChecking) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ExpressionWarnings) > 0 {
+ for _, e := range m.ExpressionWarnings {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
}
-func (this *AuditAnnotation) String() string {
- if this == nil {
- return "nil"
+
+func (m *ValidatingAdmissionPolicy) Size() (n int) {
+ if m == nil {
+ return 0
}
- s := strings.Join([]string{`&AuditAnnotation{`,
- `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
- `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`,
- `}`,
- }, "")
- return s
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
}
-func (this *ExpressionWarning) String() string {
- if this == nil {
- return "nil"
+
+func (m *ValidatingAdmissionPolicyBinding) Size() (n int) {
+ if m == nil {
+ return 0
}
- s := strings.Join([]string{`&ExpressionWarning{`,
- `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`,
- `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`,
- `}`,
- }, "")
- return s
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
}
-func (this *MatchCondition) String() string {
- if this == nil {
- return "nil"
+
+func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) {
+ if m == nil {
+ return 0
}
- s := strings.Join([]string{`&MatchCondition{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
- `}`,
- }, "")
- return s
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
}
-func (this *MatchResources) String() string {
- if this == nil {
- return "nil"
+
+func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) {
+ if m == nil {
+ return 0
}
- repeatedStringForResourceRules := "[]NamedRuleWithOperations{"
- for _, f := range this.ResourceRules {
- repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
+ var l int
+ _ = l
+ l = len(m.PolicyName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ParamRef != nil {
+ l = m.ParamRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
}
- repeatedStringForResourceRules += "}"
- repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{"
- for _, f := range this.ExcludeResourceRules {
- repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
+ if m.MatchResources != nil {
+ l = m.MatchResources.Size()
+ n += 1 + l + sovGenerated(uint64(l))
}
- repeatedStringForExcludeResourceRules += "}"
- s := strings.Join([]string{`&MatchResources{`,
- `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
- `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
- `ResourceRules:` + repeatedStringForResourceRules + `,`,
- `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`,
- `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
- `}`,
- }, "")
- return s
+ if len(m.ValidationActions) > 0 {
+ for _, s := range m.ValidationActions {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
}
-func (this *MutatingWebhook) String() string {
- if this == nil {
- return "nil"
+
+func (m *ValidatingAdmissionPolicyList) Size() (n int) {
+ if m == nil {
+ return 0
}
- repeatedStringForRules := "[]RuleWithOperations{"
- for _, f := range this.Rules {
- repeatedStringForRules += fmt.Sprintf("%v", f) + ","
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
}
- repeatedStringForRules += "}"
- repeatedStringForMatchConditions := "[]MatchCondition{"
- for _, f := range this.MatchConditions {
- repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
- }
- repeatedStringForMatchConditions += "}"
- s := strings.Join([]string{`&MutatingWebhook{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`,
- `Rules:` + repeatedStringForRules + `,`,
- `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
- `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
- `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`,
- `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
- `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`,
- `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
- `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`,
- `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
- `MatchConditions:` + repeatedStringForMatchConditions + `,`,
- `}`,
- }, "")
- return s
+ return n
}
-func (this *MutatingWebhookConfiguration) String() string {
- if this == nil {
- return "nil"
+
+func (m *ValidatingAdmissionPolicySpec) Size() (n int) {
+ if m == nil {
+ return 0
}
- repeatedStringForWebhooks := "[]MutatingWebhook{"
- for _, f := range this.Webhooks {
- repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + ","
+ var l int
+ _ = l
+ if m.ParamKind != nil {
+ l = m.ParamKind.Size()
+ n += 1 + l + sovGenerated(uint64(l))
}
- repeatedStringForWebhooks += "}"
- s := strings.Join([]string{`&MutatingWebhookConfiguration{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Webhooks:` + repeatedStringForWebhooks + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *MutatingWebhookConfigurationList) String() string {
- if this == nil {
- return "nil"
+ if m.MatchConstraints != nil {
+ l = m.MatchConstraints.Size()
+ n += 1 + l + sovGenerated(uint64(l))
}
- repeatedStringForItems := "[]MutatingWebhookConfiguration{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + ","
+ if len(m.Validations) > 0 {
+ for _, e := range m.Validations {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
}
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&MutatingWebhookConfigurationList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *NamedRuleWithOperations) String() string {
- if this == nil {
- return "nil"
+ if m.FailurePolicy != nil {
+ l = len(*m.FailurePolicy)
+ n += 1 + l + sovGenerated(uint64(l))
}
- s := strings.Join([]string{`&NamedRuleWithOperations{`,
- `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`,
- `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ParamKind) String() string {
- if this == nil {
- return "nil"
+ if len(m.AuditAnnotations) > 0 {
+ for _, e := range m.AuditAnnotations {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
}
- s := strings.Join([]string{`&ParamKind{`,
- `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
- `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ParamRef) String() string {
- if this == nil {
- return "nil"
+ if len(m.MatchConditions) > 0 {
+ for _, e := range m.MatchConditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
}
- s := strings.Join([]string{`&ParamRef{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
- `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
- `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ServiceReference) String() string {
- if this == nil {
- return "nil"
+ if len(m.Variables) > 0 {
+ for _, e := range m.Variables {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
}
- s := strings.Join([]string{`&ServiceReference{`,
- `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Path:` + valueToStringGenerated(this.Path) + `,`,
- `Port:` + valueToStringGenerated(this.Port) + `,`,
- `}`,
- }, "")
- return s
+ return n
}
-func (this *TypeChecking) String() string {
- if this == nil {
- return "nil"
+
+func (m *ValidatingAdmissionPolicyStatus) Size() (n int) {
+ if m == nil {
+ return 0
}
- repeatedStringForExpressionWarnings := "[]ExpressionWarning{"
- for _, f := range this.ExpressionWarnings {
- repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + ","
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ if m.TypeChecking != nil {
+ l = m.TypeChecking.Size()
+ n += 1 + l + sovGenerated(uint64(l))
}
- repeatedStringForExpressionWarnings += "}"
- s := strings.Join([]string{`&TypeChecking{`,
- `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ValidatingAdmissionPolicy) String() string {
- if this == nil {
- return "nil"
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
}
- s := strings.Join([]string{`&ValidatingAdmissionPolicy{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`,
- `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
+ return n
}
-func (this *ValidatingAdmissionPolicyBinding) String() string {
- if this == nil {
- return "nil"
+
+func (m *ValidatingWebhook) Size() (n int) {
+ if m == nil {
+ return 0
}
- s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ValidatingAdmissionPolicyBindingList) String() string {
- if this == nil {
- return "nil"
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.ClientConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Rules) > 0 {
+ for _, e := range m.Rules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
}
- repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + ","
+ if m.FailurePolicy != nil {
+ l = len(*m.FailurePolicy)
+ n += 1 + l + sovGenerated(uint64(l))
}
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ValidatingAdmissionPolicyBindingSpec) String() string {
- if this == nil {
- return "nil"
+ if m.NamespaceSelector != nil {
+ l = m.NamespaceSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
}
- s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`,
- `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`,
- `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`,
- `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`,
- `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ValidatingAdmissionPolicyList) String() string {
- if this == nil {
- return "nil"
+ if m.SideEffects != nil {
+ l = len(*m.SideEffects)
+ n += 1 + l + sovGenerated(uint64(l))
}
- repeatedStringForItems := "[]ValidatingAdmissionPolicy{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + ","
+ if m.TimeoutSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
}
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ValidatingAdmissionPolicySpec) String() string {
- if this == nil {
- return "nil"
+ if len(m.AdmissionReviewVersions) > 0 {
+ for _, s := range m.AdmissionReviewVersions {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
}
- repeatedStringForValidations := "[]Validation{"
- for _, f := range this.Validations {
- repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + ","
+ if m.MatchPolicy != nil {
+ l = len(*m.MatchPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
}
- repeatedStringForValidations += "}"
- repeatedStringForAuditAnnotations := "[]AuditAnnotation{"
- for _, f := range this.AuditAnnotations {
- repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + ","
+ if m.ObjectSelector != nil {
+ l = m.ObjectSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
}
- repeatedStringForAuditAnnotations += "}"
- repeatedStringForMatchConditions := "[]MatchCondition{"
- for _, f := range this.MatchConditions {
- repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+ if len(m.MatchConditions) > 0 {
+ for _, e := range m.MatchConditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
}
- repeatedStringForMatchConditions += "}"
- repeatedStringForVariables := "[]Variable{"
- for _, f := range this.Variables {
- repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + ","
+ return n
+}
+
+func (m *ValidatingWebhookConfiguration) Size() (n int) {
+ if m == nil {
+ return 0
}
- repeatedStringForVariables += "}"
- s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`,
- `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`,
- `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`,
- `Validations:` + repeatedStringForValidations + `,`,
- `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
- `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`,
- `MatchConditions:` + repeatedStringForMatchConditions + `,`,
- `Variables:` + repeatedStringForVariables + `,`,
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Webhooks) > 0 {
+ for _, e := range m.Webhooks {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ValidatingWebhookConfigurationList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Validation) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Reason != nil {
+ l = len(*m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.MessageExpression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Variable) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *WebhookClientConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Service != nil {
+ l = m.Service.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.CABundle != nil {
+ l = len(m.CABundle)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.URL != nil {
+ l = len(*m.URL)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *ApplyConfiguration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ApplyConfiguration{`,
+ `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
`}`,
}, "")
return s
}
-func (this *ValidatingAdmissionPolicyStatus) String() string {
+func (this *AuditAnnotation) String() string {
if this == nil {
return "nil"
}
- repeatedStringForConditions := "[]Condition{"
- for _, f := range this.Conditions {
- repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
- }
- repeatedStringForConditions += "}"
- s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`,
- `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
- `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`,
- `Conditions:` + repeatedStringForConditions + `,`,
+ s := strings.Join([]string{`&AuditAnnotation{`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`,
`}`,
}, "")
return s
}
-func (this *ValidatingWebhook) String() string {
+func (this *ExpressionWarning) String() string {
if this == nil {
return "nil"
}
- repeatedStringForRules := "[]RuleWithOperations{"
- for _, f := range this.Rules {
- repeatedStringForRules += fmt.Sprintf("%v", f) + ","
+ s := strings.Join([]string{`&ExpressionWarning{`,
+ `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`,
+ `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *JSONPatch) String() string {
+ if this == nil {
+ return "nil"
}
- repeatedStringForRules += "}"
- repeatedStringForMatchConditions := "[]MatchCondition{"
- for _, f := range this.MatchConditions {
- repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+ s := strings.Join([]string{`&JSONPatch{`,
+ `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MatchCondition) String() string {
+ if this == nil {
+ return "nil"
}
- repeatedStringForMatchConditions += "}"
- s := strings.Join([]string{`&ValidatingWebhook{`,
+ s := strings.Join([]string{`&MatchCondition{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`,
- `Rules:` + repeatedStringForRules + `,`,
- `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
+ `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MatchResources) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForResourceRules := "[]NamedRuleWithOperations{"
+ for _, f := range this.ResourceRules {
+ repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForResourceRules += "}"
+ repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{"
+ for _, f := range this.ExcludeResourceRules {
+ repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForExcludeResourceRules += "}"
+ s := strings.Join([]string{`&MatchResources{`,
`NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
- `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`,
- `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
- `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`,
- `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
`ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
- `MatchConditions:` + repeatedStringForMatchConditions + `,`,
+ `ResourceRules:` + repeatedStringForResourceRules + `,`,
+ `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`,
+ `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
`}`,
}, "")
return s
}
-func (this *ValidatingWebhookConfiguration) String() string {
+func (this *MutatingAdmissionPolicy) String() string {
if this == nil {
return "nil"
}
- repeatedStringForWebhooks := "[]ValidatingWebhook{"
- for _, f := range this.Webhooks {
- repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + ","
+ s := strings.Join([]string{`&MutatingAdmissionPolicy{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MutatingAdmissionPolicySpec", "MutatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MutatingAdmissionPolicyBinding) String() string {
+ if this == nil {
+ return "nil"
}
- repeatedStringForWebhooks += "}"
- s := strings.Join([]string{`&ValidatingWebhookConfiguration{`,
+ s := strings.Join([]string{`&MutatingAdmissionPolicyBinding{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Webhooks:` + repeatedStringForWebhooks + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MutatingAdmissionPolicyBindingSpec", "MutatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
-func (this *ValidatingWebhookConfigurationList) String() string {
+func (this *MutatingAdmissionPolicyBindingList) String() string {
if this == nil {
return "nil"
}
- repeatedStringForItems := "[]ValidatingWebhookConfiguration{"
+ repeatedStringForItems := "[]MutatingAdmissionPolicyBinding{"
for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + ","
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingAdmissionPolicyBinding", "MutatingAdmissionPolicyBinding", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
- s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`,
+ s := strings.Join([]string{`&MutatingAdmissionPolicyBindingList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
-func (this *Validation) String() string {
+func (this *MutatingAdmissionPolicyBindingSpec) String() string {
if this == nil {
return "nil"
}
- s := strings.Join([]string{`&Validation{`,
- `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
- `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
- `Reason:` + valueToStringGenerated(this.Reason) + `,`,
- `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`,
+ s := strings.Join([]string{`&MutatingAdmissionPolicyBindingSpec{`,
+ `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`,
+ `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`,
+ `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`,
`}`,
}, "")
return s
}
-func (this *Variable) String() string {
+func (this *MutatingAdmissionPolicyList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]MutatingAdmissionPolicy{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingAdmissionPolicy", "MutatingAdmissionPolicy", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&MutatingAdmissionPolicyList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MutatingAdmissionPolicySpec) String() string {
if this == nil {
return "nil"
}
- s := strings.Join([]string{`&Variable{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *WebhookClientConfig) String() string {
- if this == nil {
- return "nil"
+ repeatedStringForVariables := "[]Variable{"
+ for _, f := range this.Variables {
+ repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForVariables += "}"
+ repeatedStringForMutations := "[]Mutation{"
+ for _, f := range this.Mutations {
+ repeatedStringForMutations += strings.Replace(strings.Replace(f.String(), "Mutation", "Mutation", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMutations += "}"
+ repeatedStringForMatchConditions := "[]MatchCondition{"
+ for _, f := range this.MatchConditions {
+ repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMatchConditions += "}"
+ s := strings.Join([]string{`&MutatingAdmissionPolicySpec{`,
+ `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`,
+ `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`,
+ `Variables:` + repeatedStringForVariables + `,`,
+ `Mutations:` + repeatedStringForMutations + `,`,
+ `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
+ `MatchConditions:` + repeatedStringForMatchConditions + `,`,
+ `ReinvocationPolicy:` + fmt.Sprintf("%v", this.ReinvocationPolicy) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MutatingWebhook) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForRules := "[]RuleWithOperations{"
+ for _, f := range this.Rules {
+ repeatedStringForRules += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForRules += "}"
+ repeatedStringForMatchConditions := "[]MatchCondition{"
+ for _, f := range this.MatchConditions {
+ repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMatchConditions += "}"
+ s := strings.Join([]string{`&MutatingWebhook{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`,
+ `Rules:` + repeatedStringForRules + `,`,
+ `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
+ `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`,
+ `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
+ `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`,
+ `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
+ `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`,
+ `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `MatchConditions:` + repeatedStringForMatchConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MutatingWebhookConfiguration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForWebhooks := "[]MutatingWebhook{"
+ for _, f := range this.Webhooks {
+ repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForWebhooks += "}"
+ s := strings.Join([]string{`&MutatingWebhookConfiguration{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Webhooks:` + repeatedStringForWebhooks + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MutatingWebhookConfigurationList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]MutatingWebhookConfiguration{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&MutatingWebhookConfigurationList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Mutation) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Mutation{`,
+ `PatchType:` + fmt.Sprintf("%v", this.PatchType) + `,`,
+ `ApplyConfiguration:` + strings.Replace(this.ApplyConfiguration.String(), "ApplyConfiguration", "ApplyConfiguration", 1) + `,`,
+ `JSONPatch:` + strings.Replace(this.JSONPatch.String(), "JSONPatch", "JSONPatch", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NamedRuleWithOperations) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NamedRuleWithOperations{`,
+ `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`,
+ `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ParamKind) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ParamKind{`,
+ `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
+ `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ParamRef) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ParamRef{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ServiceReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ServiceReference{`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Path:` + valueToStringGenerated(this.Path) + `,`,
+ `Port:` + valueToStringGenerated(this.Port) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TypeChecking) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForExpressionWarnings := "[]ExpressionWarning{"
+ for _, f := range this.ExpressionWarnings {
+ repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForExpressionWarnings += "}"
+ s := strings.Join([]string{`&TypeChecking{`,
+ `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ValidatingAdmissionPolicy{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicyBinding) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicyBindingList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicyBindingSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`,
+ `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`,
+ `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`,
+ `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`,
+ `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicyList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ValidatingAdmissionPolicy{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicySpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForValidations := "[]Validation{"
+ for _, f := range this.Validations {
+ repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForValidations += "}"
+ repeatedStringForAuditAnnotations := "[]AuditAnnotation{"
+ for _, f := range this.AuditAnnotations {
+ repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForAuditAnnotations += "}"
+ repeatedStringForMatchConditions := "[]MatchCondition{"
+ for _, f := range this.MatchConditions {
+ repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMatchConditions += "}"
+ repeatedStringForVariables := "[]Variable{"
+ for _, f := range this.Variables {
+ repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForVariables += "}"
+ s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`,
+ `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`,
+ `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`,
+ `Validations:` + repeatedStringForValidations + `,`,
+ `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
+ `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`,
+ `MatchConditions:` + repeatedStringForMatchConditions + `,`,
+ `Variables:` + repeatedStringForVariables + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicyStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingWebhook) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForRules := "[]RuleWithOperations{"
+ for _, f := range this.Rules {
+ repeatedStringForRules += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForRules += "}"
+ repeatedStringForMatchConditions := "[]MatchCondition{"
+ for _, f := range this.MatchConditions {
+ repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMatchConditions += "}"
+ s := strings.Join([]string{`&ValidatingWebhook{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`,
+ `Rules:` + repeatedStringForRules + `,`,
+ `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
+ `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`,
+ `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
+ `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`,
+ `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
+ `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `MatchConditions:` + repeatedStringForMatchConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingWebhookConfiguration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForWebhooks := "[]ValidatingWebhook{"
+ for _, f := range this.Webhooks {
+ repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForWebhooks += "}"
+ s := strings.Join([]string{`&ValidatingWebhookConfiguration{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Webhooks:` + repeatedStringForWebhooks + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingWebhookConfigurationList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ValidatingWebhookConfiguration{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Validation) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Validation{`,
+ `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `Reason:` + valueToStringGenerated(this.Reason) + `,`,
+ `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Variable) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Variable{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *WebhookClientConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&WebhookClientConfig{`,
+ `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`,
+ `CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
+ `URL:` + valueToStringGenerated(this.URL) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *ApplyConfiguration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ApplyConfiguration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ApplyConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Expression = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ValueExpression = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FieldRef = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Warning = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JSONPatch) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JSONPatch: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JSONPatch: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Expression = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MatchCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Expression = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MatchResources) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MatchResources: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NamespaceSelector == nil {
+ m.NamespaceSelector = &v1.LabelSelector{}
+ }
+ if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ObjectSelector == nil {
+ m.ObjectSelector = &v1.LabelSelector{}
+ }
+ if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{})
+ if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{})
+ if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := MatchPolicyType(dAtA[iNdEx:postIndex])
+ m.MatchPolicy = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MutatingAdmissionPolicy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MutatingAdmissionPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MutatingAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
}
- s := strings.Join([]string{`&WebhookClientConfig{`,
- `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`,
- `CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
- `URL:` + valueToStringGenerated(this.URL) + `,`,
- `}`,
- }, "")
- return s
+ return nil
}
-func valueToStringGenerated(v interface{}) string {
- rv := reflect.ValueOf(v)
- if rv.IsNil() {
- return "nil"
+func (m *MutatingAdmissionPolicyBinding) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MutatingAdmissionPolicyBinding: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MutatingAdmissionPolicyBinding: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
}
- pv := reflect.Indirect(rv).Interface()
- return fmt.Sprintf("*%v", pv)
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
}
-func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
+func (m *MutatingAdmissionPolicyBindingList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -3273,17 +5243,17 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group")
+ return fmt.Errorf("proto: MutatingAdmissionPolicyBindingList: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: MutatingAdmissionPolicyBindingList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -3293,29 +5263,30 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Key = string(dAtA[iNdEx:postIndex])
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -3325,23 +5296,25 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ValueExpression = string(dAtA[iNdEx:postIndex])
+ m.Items = append(m.Items, MutatingAdmissionPolicyBinding{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -3364,7 +5337,7 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
+func (m *MutatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -3387,15 +5360,15 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group")
+ return fmt.Errorf("proto: MutatingAdmissionPolicyBindingSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: MutatingAdmissionPolicyBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
- case 2:
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field PolicyName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -3423,13 +5396,49 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.FieldRef = string(dAtA[iNdEx:postIndex])
+ m.PolicyName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ParamRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ParamRef == nil {
+ m.ParamRef = &ParamRef{}
+ }
+ if err := m.ParamRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchResources", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -3439,23 +5448,27 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Warning = string(dAtA[iNdEx:postIndex])
+ if m.MatchResources == nil {
+ m.MatchResources = &MatchResources{}
+ }
+ if err := m.MatchResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -3478,7 +5491,7 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *MatchCondition) Unmarshal(dAtA []byte) error {
+func (m *MutatingAdmissionPolicyList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -3501,17 +5514,17 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group")
+ return fmt.Errorf("proto: MutatingAdmissionPolicyList: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: MutatingAdmissionPolicyList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -3521,29 +5534,30 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -3553,23 +5567,25 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Expression = string(dAtA[iNdEx:postIndex])
+ m.Items = append(m.Items, MutatingAdmissionPolicy{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -3592,7 +5608,7 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *MatchResources) Unmarshal(dAtA []byte) error {
+func (m *MutatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -3615,15 +5631,15 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: MatchResources: wiretype end group for non-group")
+ return fmt.Errorf("proto: MutatingAdmissionPolicySpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: MutatingAdmissionPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ParamKind", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -3650,16 +5666,16 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.NamespaceSelector == nil {
- m.NamespaceSelector = &v1.LabelSelector{}
+ if m.ParamKind == nil {
+ m.ParamKind = &ParamKind{}
}
- if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.ParamKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchConstraints", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -3686,16 +5702,16 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.ObjectSelector == nil {
- m.ObjectSelector = &v1.LabelSelector{}
+ if m.MatchConstraints == nil {
+ m.MatchConstraints = &MatchResources{}
}
- if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.MatchConstraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -3722,14 +5738,14 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{})
- if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Variables = append(m.Variables, Variable{})
+ if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Mutations", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -3756,14 +5772,81 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{})
- if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Mutations = append(m.Mutations, Mutation{})
+ if err := m.Mutations[len(m.Mutations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := FailurePolicyType(dAtA[iNdEx:postIndex])
+ m.FailurePolicy = &s
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MatchConditions = append(m.MatchConditions, MatchCondition{})
+ if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -3791,8 +5874,7 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := MatchPolicyType(dAtA[iNdEx:postIndex])
- m.MatchPolicy = &s
+ m.ReinvocationPolicy = k8s_io_api_admissionregistration_v1.ReinvocationPolicyType(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -4160,7 +6242,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := ReinvocationPolicyType(dAtA[iNdEx:postIndex])
+ s := k8s_io_api_admissionregistration_v1.ReinvocationPolicyType(dAtA[iNdEx:postIndex])
m.ReinvocationPolicy = &s
iNdEx = postIndex
case 11:
@@ -4488,6 +6570,160 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *Mutation) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Mutation: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Mutation: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PatchType = PatchType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ApplyConfiguration", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ApplyConfiguration == nil {
+ m.ApplyConfiguration = &ApplyConfiguration{}
+ }
+ if err := m.ApplyConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field JSONPatch", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.JSONPatch == nil {
+ m.JSONPatch = &JSONPatch{}
+ }
+ if err := m.JSONPatch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *NamedRuleWithOperations) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
index 30f99f64d..fb47a2005 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
@@ -29,6 +29,51 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "k8s.io/api/admissionregistration/v1beta1";
+// ApplyConfiguration defines the desired configuration values of an object.
+message ApplyConfiguration {
+ // expression will be evaluated by CEL to create an apply configuration.
+ // ref: https://github.com/google/cel-spec
+ //
+ // Apply configurations are declared in CEL using object initialization. For example, this CEL expression
+ // returns an apply configuration to set a single field:
+ //
+ // Object{
+ // spec: Object.spec{
+ // serviceAccountName: "example"
+ // }
+ // }
+ //
+ // Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of
+ // values not included in the apply configuration.
+ //
+ // CEL expressions have access to the object types needed to create apply configurations:
+ //
+ // - 'Object' - CEL type of the resource object.
+ // - 'Object.' - CEL type of object field (such as 'Object.spec')
+ // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
+ //
+ // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
+ // object. No other metadata properties are accessible.
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Required.
+ optional string expression = 1;
+}
+
// AuditAnnotation describes how to produce an audit annotation for an API request.
message AuditAnnotation {
// key specifies the audit annotation key. The audit annotation keys of
@@ -79,6 +124,75 @@ message ExpressionWarning {
optional string warning = 3;
}
+// JSONPatch defines a JSON Patch.
+message JSONPatch {
+ // expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/).
+ // ref: https://github.com/google/cel-spec
+ //
+ // expression must return an array of JSONPatch values.
+ //
+ // For example, this CEL expression returns a JSON patch to conditionally modify a value:
+ //
+ // [
+ // JSONPatch{op: "test", path: "/spec/example", value: "Red"},
+ // JSONPatch{op: "replace", path: "/spec/example", value: "Green"}
+ // ]
+ //
+ // To define an object for the patch value, use Object types. For example:
+ //
+ // [
+ // JSONPatch{
+ // op: "add",
+ // path: "/spec/selector",
+ // value: Object.spec.selector{matchLabels: {"environment": "test"}}
+ // }
+ // ]
+ //
+ // To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example:
+ //
+ // [
+ // JSONPatch{
+ // op: "add",
+ // path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"),
+ // value: "test"
+ // },
+ // ]
+ //
+ // CEL expressions have access to the types needed to create JSON patches and objects:
+ //
+ // - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.
+ // See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,
+ // integer, array, map or object. If set, the 'path' and 'from' fields must be set to a
+ // [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL
+ // function may be used to escape path keys containing '/' and '~'.
+ // - 'Object' - CEL type of the resource object.
+ // - 'Object.' - CEL type of object field (such as 'Object.spec')
+ // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
+ //
+ // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries)
+ // as well as:
+ //
+ // - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Required.
+ optional string expression = 1;
+}
+
// MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.
message MatchCondition {
// Name is an identifier for this match condition, used for strategic merging of MatchConditions,
@@ -203,6 +317,173 @@ message MatchResources {
optional string matchPolicy = 7;
}
+// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.
+message MutatingAdmissionPolicy {
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Specification of the desired behavior of the MutatingAdmissionPolicy.
+ optional MutatingAdmissionPolicySpec spec = 2;
+}
+
+// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources.
+// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators
+// configure policies for clusters.
+//
+// For a given admission request, each binding will cause its policy to be
+// evaluated N times, where N is 1 for policies/bindings that don't use
+// params, otherwise N is the number of parameters selected by the binding.
+// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).
+//
+// Adding/removing policies, bindings, or params can not affect whether a
+// given (policy, binding, param) combination is within its own CEL budget.
+message MutatingAdmissionPolicyBinding {
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Specification of the desired behavior of the MutatingAdmissionPolicyBinding.
+ optional MutatingAdmissionPolicyBindingSpec spec = 2;
+}
+
+// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.
+message MutatingAdmissionPolicyBindingList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // List of PolicyBinding.
+ repeated MutatingAdmissionPolicyBinding items = 2;
+}
+
+// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.
+message MutatingAdmissionPolicyBindingSpec {
+ // policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to.
+ // If the referenced resource does not exist, this binding is considered invalid and will be ignored
+ // Required.
+ optional string policyName = 1;
+
+ // paramRef specifies the parameter resource used to configure the admission control policy.
+ // It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy.
+ // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied.
+ // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
+ // +optional
+ optional ParamRef paramRef = 2;
+
+ // matchResources limits what resources match this binding and may be mutated by it.
+ // Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and
+ // matchConditions before the resource may be mutated.
+ // When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints
+ // and matchConditions must match for the resource to be mutated.
+ // Additionally, matchResources.resourceRules are optional and do not constraint matching when unset.
+ // Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required.
+ // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
+ // '*' matches CREATE, UPDATE and CONNECT.
+ // +optional
+ optional MatchResources matchResources = 3;
+}
+
+// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.
+message MutatingAdmissionPolicyList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // List of ValidatingAdmissionPolicy.
+ repeated MutatingAdmissionPolicy items = 2;
+}
+
+// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.
+message MutatingAdmissionPolicySpec {
+ // paramKind specifies the kind of resources used to parameterize this policy.
+ // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
+ // If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
+ // If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.
+ // +optional
+ optional ParamKind paramKind = 1;
+
+ // matchConstraints specifies what resources this policy is designed to validate.
+ // The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints.
+ // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
+ // MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding.
+ // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
+ // '*' matches CREATE, UPDATE and CONNECT.
+ // Required.
+ optional MatchResources matchConstraints = 2;
+
+ // variables contain definitions of variables that can be used in composition of other expressions.
+ // Each variable is defined as a named CEL expression.
+ // The variables defined here will be available under `variables` in other expressions of the policy
+ // except matchConditions because matchConditions are evaluated before the rest of the policy.
+ //
+ // The expression of a variable can refer to other variables defined earlier in the list but not those after.
+ // Thus, variables must be sorted by the order of first appearance and acyclic.
+ // +listType=atomic
+ // +optional
+ repeated Variable variables = 3;
+
+ // mutations contain operations to perform on matching objects.
+ // mutations may not be empty; a minimum of one mutation is required.
+ // mutations are evaluated in order, and are reinvoked according to
+ // the reinvocationPolicy.
+ // The mutations of a policy are invoked for each binding of this policy
+ // and reinvocation of mutations occurs on a per binding basis.
+ //
+ // +listType=atomic
+ // +optional
+ repeated Mutation mutations = 4;
+
+ // failurePolicy defines how to handle failures for the admission policy. Failures can
+ // occur from CEL expression parse errors, type check errors, runtime errors and invalid
+ // or mis-configured policy definitions or bindings.
+ //
+ // A policy is invalid if paramKind refers to a non-existent Kind.
+ // A binding is invalid if paramRef.name refers to a non-existent resource.
+ //
+ // failurePolicy does not define how validations that evaluate to false are handled.
+ //
+ // Allowed values are Ignore or Fail. Defaults to Fail.
+ // +optional
+ optional string failurePolicy = 5;
+
+ // matchConditions is a list of conditions that must be met for a request to be validated.
+ // Match conditions filter requests that have already been matched by the matchConstraints.
+ // An empty list of matchConditions matches all requests.
+ // There are a maximum of 64 match conditions allowed.
+ //
+ // If a parameter object is provided, it can be accessed via the `params` handle in the same
+ // manner as validation expressions.
+ //
+ // The exact matching logic is (in order):
+ // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
+ // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
+ // 3. If any matchCondition evaluates to an error (but none are FALSE):
+ // - If failurePolicy=Fail, reject the request
+ // - If failurePolicy=Ignore, the policy is skipped
+ //
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ repeated MatchCondition matchConditions = 6;
+
+ // reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding
+ // as part of a single admission evaluation.
+ // Allowed values are "Never" and "IfNeeded".
+ //
+ // Never: These mutations will not be called more than once per binding in a single admission evaluation.
+ //
+ // IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of
+ // order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only
+ // reinvoked when mutations change the object after this mutation is invoked.
+ // Required.
+ optional string reinvocationPolicy = 7;
+}
+
// MutatingWebhook describes an admission webhook and the resources and operations it applies to.
message MutatingWebhook {
// The name of the admission webhook.
@@ -401,6 +682,26 @@ message MutatingWebhookConfigurationList {
repeated MutatingWebhookConfiguration items = 2;
}
+// Mutation specifies the CEL expression which is used to apply the Mutation.
+message Mutation {
+ // patchType indicates the patch strategy used.
+ // Allowed values are "ApplyConfiguration" and "JSONPatch".
+ // Required.
+ //
+ // +unionDiscriminator
+ optional string patchType = 2;
+
+ // applyConfiguration defines the desired configuration values of an object.
+ // The configuration is applied to the admission object using
+ // [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff).
+ // A CEL expression is used to create apply configuration.
+ optional ApplyConfiguration applyConfiguration = 3;
+
+ // jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object.
+ // A CEL expression is used to create the JSON patch.
+ optional JSONPatch jsonPatch = 4;
+}
+
// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.
// +structType=atomic
message NamedRuleWithOperations {
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go
index 363233a2f..be64c4a5f 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go
@@ -54,6 +54,10 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&ValidatingAdmissionPolicyList{},
&ValidatingAdmissionPolicyBinding{},
&ValidatingAdmissionPolicyBindingList{},
+ &MutatingAdmissionPolicy{},
+ &MutatingAdmissionPolicyList{},
+ &MutatingAdmissionPolicyBinding{},
+ &MutatingAdmissionPolicyBindingList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
index 0f5903123..cffdda82c 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
@@ -1073,7 +1073,7 @@ type MutatingWebhook struct {
}
// ReinvocationPolicyType specifies what type of policy the admission hook uses.
-type ReinvocationPolicyType string
+type ReinvocationPolicyType = v1.ReinvocationPolicyType
const (
// NeverReinvocationPolicy indicates that the webhook must not be called more than once in a
@@ -1197,3 +1197,332 @@ type MatchCondition struct {
// Required.
Expression string `json:"expression" protobuf:"bytes,2,opt,name=expression"`
}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.34
+
+// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.
+type MutatingAdmissionPolicy struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Specification of the desired behavior of the MutatingAdmissionPolicy.
+ Spec MutatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.34
+
+// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.
+type MutatingAdmissionPolicyList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // List of ValidatingAdmissionPolicy.
+ Items []MutatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.
+type MutatingAdmissionPolicySpec struct {
+ // paramKind specifies the kind of resources used to parameterize this policy.
+ // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
+ // If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
+ // If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.
+ // +optional
+ ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"`
+
+ // matchConstraints specifies what resources this policy is designed to validate.
+ // The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints.
+ // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
+ // MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding.
+ // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
+ // '*' matches CREATE, UPDATE and CONNECT.
+ // Required.
+ MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"`
+
+ // variables contain definitions of variables that can be used in composition of other expressions.
+ // Each variable is defined as a named CEL expression.
+ // The variables defined here will be available under `variables` in other expressions of the policy
+ // except matchConditions because matchConditions are evaluated before the rest of the policy.
+ //
+ // The expression of a variable can refer to other variables defined earlier in the list but not those after.
+ // Thus, variables must be sorted by the order of first appearance and acyclic.
+ // +listType=atomic
+ // +optional
+ Variables []Variable `json:"variables,omitempty" protobuf:"bytes,3,rep,name=variables"`
+
+ // mutations contain operations to perform on matching objects.
+ // mutations may not be empty; a minimum of one mutation is required.
+ // mutations are evaluated in order, and are reinvoked according to
+ // the reinvocationPolicy.
+ // The mutations of a policy are invoked for each binding of this policy
+ // and reinvocation of mutations occurs on a per binding basis.
+ //
+ // +listType=atomic
+ // +optional
+ Mutations []Mutation `json:"mutations,omitempty" protobuf:"bytes,4,rep,name=mutations"`
+
+ // failurePolicy defines how to handle failures for the admission policy. Failures can
+ // occur from CEL expression parse errors, type check errors, runtime errors and invalid
+ // or mis-configured policy definitions or bindings.
+ //
+ // A policy is invalid if paramKind refers to a non-existent Kind.
+ // A binding is invalid if paramRef.name refers to a non-existent resource.
+ //
+ // failurePolicy does not define how validations that evaluate to false are handled.
+ //
+ // Allowed values are Ignore or Fail. Defaults to Fail.
+ // +optional
+ FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,5,opt,name=failurePolicy,casttype=FailurePolicyType"`
+
+ // matchConditions is a list of conditions that must be met for a request to be validated.
+ // Match conditions filter requests that have already been matched by the matchConstraints.
+ // An empty list of matchConditions matches all requests.
+ // There are a maximum of 64 match conditions allowed.
+ //
+ // If a parameter object is provided, it can be accessed via the `params` handle in the same
+ // manner as validation expressions.
+ //
+ // The exact matching logic is (in order):
+ // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
+ // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
+ // 3. If any matchCondition evaluates to an error (but none are FALSE):
+ // - If failurePolicy=Fail, reject the request
+ // - If failurePolicy=Ignore, the policy is skipped
+ //
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"`
+
+ // reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding
+ // as part of a single admission evaluation.
+ // Allowed values are "Never" and "IfNeeded".
+ //
+ // Never: These mutations will not be called more than once per binding in a single admission evaluation.
+ //
+ // IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of
+ // order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only
+ // reinvoked when mutations change the object after this mutation is invoked.
+ // Required.
+ ReinvocationPolicy ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,7,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"`
+}
+
+// Mutation specifies the CEL expression which is used to apply the Mutation.
+type Mutation struct {
+ // patchType indicates the patch strategy used.
+ // Allowed values are "ApplyConfiguration" and "JSONPatch".
+ // Required.
+ //
+ // +unionDiscriminator
+ PatchType PatchType `json:"patchType" protobuf:"bytes,2,opt,name=patchType,casttype=PatchType"`
+
+ // applyConfiguration defines the desired configuration values of an object.
+ // The configuration is applied to the admission object using
+ // [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff).
+ // A CEL expression is used to create apply configuration.
+ ApplyConfiguration *ApplyConfiguration `json:"applyConfiguration,omitempty" protobuf:"bytes,3,opt,name=applyConfiguration"`
+
+ // jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object.
+ // A CEL expression is used to create the JSON patch.
+ JSONPatch *JSONPatch `json:"jsonPatch,omitempty" protobuf:"bytes,4,opt,name=jsonPatch"`
+}
+
+// PatchType specifies the type of patch operation for a mutation.
+// +enum
+type PatchType string
+
+const (
+ // ApplyConfiguration indicates that the mutation is using apply configuration to mutate the object.
+ PatchTypeApplyConfiguration PatchType = "ApplyConfiguration"
+ // JSONPatch indicates that the object is mutated through JSON Patch.
+ PatchTypeJSONPatch PatchType = "JSONPatch"
+)
+
+// ApplyConfiguration defines the desired configuration values of an object.
+type ApplyConfiguration struct {
+ // expression will be evaluated by CEL to create an apply configuration.
+ // ref: https://github.com/google/cel-spec
+ //
+ // Apply configurations are declared in CEL using object initialization. For example, this CEL expression
+ // returns an apply configuration to set a single field:
+ //
+ // Object{
+ // spec: Object.spec{
+ // serviceAccountName: "example"
+ // }
+ // }
+ //
+ // Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of
+ // values not included in the apply configuration.
+ //
+ // CEL expressions have access to the object types needed to create apply configurations:
+ //
+ // - 'Object' - CEL type of the resource object.
+ // - 'Object.' - CEL type of object field (such as 'Object.spec')
+ // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
+ //
+ // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
+ // object. No other metadata properties are accessible.
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Required.
+ Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"`
+}
+
+// JSONPatch defines a JSON Patch.
+type JSONPatch struct {
+ // expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/).
+ // ref: https://github.com/google/cel-spec
+ //
+ // expression must return an array of JSONPatch values.
+ //
+ // For example, this CEL expression returns a JSON patch to conditionally modify a value:
+ //
+ // [
+ // JSONPatch{op: "test", path: "/spec/example", value: "Red"},
+ // JSONPatch{op: "replace", path: "/spec/example", value: "Green"}
+ // ]
+ //
+ // To define an object for the patch value, use Object types. For example:
+ //
+ // [
+ // JSONPatch{
+ // op: "add",
+ // path: "/spec/selector",
+ // value: Object.spec.selector{matchLabels: {"environment": "test"}}
+ // }
+ // ]
+ //
+ // To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example:
+ //
+ // [
+ // JSONPatch{
+ // op: "add",
+ // path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"),
+ // value: "test"
+ // },
+ // ]
+ //
+ // CEL expressions have access to the types needed to create JSON patches and objects:
+ //
+ // - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.
+ // See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,
+ // integer, array, map or object. If set, the 'path' and 'from' fields must be set to a
+ // [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL
+ // function may be used to escape path keys containing '/' and '~'.
+ // - 'Object' - CEL type of the resource object.
+ // - 'Object.' - CEL type of object field (such as 'Object.spec')
+ // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
+ //
+ // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries)
+ // as well as:
+ //
+ // - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).
+ //
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Required.
+ Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.34
+
+// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources.
+// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators
+// configure policies for clusters.
+//
+// For a given admission request, each binding will cause its policy to be
+// evaluated N times, where N is 1 for policies/bindings that don't use
+// params, otherwise N is the number of parameters selected by the binding.
+// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).
+//
+// Adding/removing policies, bindings, or params can not affect whether a
+// given (policy, binding, param) combination is within its own CEL budget.
+type MutatingAdmissionPolicyBinding struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Specification of the desired behavior of the MutatingAdmissionPolicyBinding.
+ Spec MutatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.34
+
+// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.
+type MutatingAdmissionPolicyBindingList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // List of PolicyBinding.
+ Items []MutatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.
+type MutatingAdmissionPolicyBindingSpec struct {
+ // policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to.
+ // If the referenced resource does not exist, this binding is considered invalid and will be ignored
+ // Required.
+ PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"`
+
+ // paramRef specifies the parameter resource used to configure the admission control policy.
+ // It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy.
+ // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied.
+ // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
+ // +optional
+ ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"`
+
+ // matchResources limits what resources match this binding and may be mutated by it.
+ // Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and
+ // matchConditions before the resource may be mutated.
+ // When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints
+ // and matchConditions must match for the resource to be mutated.
+ // Additionally, matchResources.resourceRules are optional and do not constraint matching when unset.
+ // Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required.
+ // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
+ // '*' matches CREATE, UPDATE and CONNECT.
+ // +optional
+ MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"`
+}
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
index cc1509b53..1a97c9472 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
@@ -27,6 +27,15 @@ package v1beta1
// Those methods can be generated by using hack/update-codegen.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_ApplyConfiguration = map[string]string{
+ "": "ApplyConfiguration defines the desired configuration values of an object.",
+ "expression": "expression will be evaluated by CEL to create an apply configuration. ref: https://github.com/google/cel-spec\n\nApply configurations are declared in CEL using object initialization. For example, this CEL expression returns an apply configuration to set a single field:\n\n\tObject{\n\t spec: Object.spec{\n\t serviceAccountName: \"example\"\n\t }\n\t}\n\nApply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of values not included in the apply configuration.\n\nCEL expressions have access to the object types needed to create apply configurations:\n\n- 'Object' - CEL type of the resource object. - 'Object.' - CEL type of object field (such as 'Object.spec') - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.",
+}
+
+func (ApplyConfiguration) SwaggerDoc() map[string]string {
+ return map_ApplyConfiguration
+}
+
var map_AuditAnnotation = map[string]string{
"": "AuditAnnotation describes how to produce an audit annotation for an API request.",
"key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.",
@@ -47,6 +56,15 @@ func (ExpressionWarning) SwaggerDoc() map[string]string {
return map_ExpressionWarning
}
+var map_JSONPatch = map[string]string{
+ "": "JSONPatch defines a JSON Patch.",
+ "expression": "expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). ref: https://github.com/google/cel-spec\n\nexpression must return an array of JSONPatch values.\n\nFor example, this CEL expression returns a JSON patch to conditionally modify a value:\n\n\t [\n\t JSONPatch{op: \"test\", path: \"/spec/example\", value: \"Red\"},\n\t JSONPatch{op: \"replace\", path: \"/spec/example\", value: \"Green\"}\n\t ]\n\nTo define an object for the patch value, use Object types. For example:\n\n\t [\n\t JSONPatch{\n\t op: \"add\",\n\t path: \"/spec/selector\",\n\t value: Object.spec.selector{matchLabels: {\"environment\": \"test\"}}\n\t }\n\t ]\n\nTo use strings containing '/' and '~' as JSONPatch path keys, use \"jsonpatch.escapeKey\". For example:\n\n\t [\n\t JSONPatch{\n\t op: \"add\",\n\t path: \"/metadata/labels/\" + jsonpatch.escapeKey(\"example.com/environment\"),\n\t value: \"test\"\n\t },\n\t ]\n\nCEL expressions have access to the types needed to create JSON patches and objects:\n\n- 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.\n See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,\n integer, array, map or object. If set, the 'path' and 'from' fields must be set to a\n [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL\n function may be used to escape path keys containing '/' and '~'.\n- 'Object' - CEL type of the resource object. - 'Object.' - CEL type of object field (such as 'Object.spec') - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nCEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) as well as:\n\n- 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.",
+}
+
+func (JSONPatch) SwaggerDoc() map[string]string {
+ return map_JSONPatch
+}
+
var map_MatchCondition = map[string]string{
"": "MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.",
"name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.",
@@ -70,6 +88,72 @@ func (MatchResources) SwaggerDoc() map[string]string {
return map_MatchResources
}
+var map_MutatingAdmissionPolicy = map[string]string{
+ "": "MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.",
+ "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
+ "spec": "Specification of the desired behavior of the MutatingAdmissionPolicy.",
+}
+
+func (MutatingAdmissionPolicy) SwaggerDoc() map[string]string {
+ return map_MutatingAdmissionPolicy
+}
+
+var map_MutatingAdmissionPolicyBinding = map[string]string{
+ "": "MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding. Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).\n\nAdding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.",
+ "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
+ "spec": "Specification of the desired behavior of the MutatingAdmissionPolicyBinding.",
+}
+
+func (MutatingAdmissionPolicyBinding) SwaggerDoc() map[string]string {
+ return map_MutatingAdmissionPolicyBinding
+}
+
+var map_MutatingAdmissionPolicyBindingList = map[string]string{
+ "": "MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "items": "List of PolicyBinding.",
+}
+
+func (MutatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string {
+ return map_MutatingAdmissionPolicyBindingList
+}
+
+var map_MutatingAdmissionPolicyBindingSpec = map[string]string{
+ "": "MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.",
+ "policyName": "policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.",
+ "paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.",
+ "matchResources": "matchResources limits what resources match this binding and may be mutated by it. Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and matchConditions before the resource may be mutated. When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints and matchConditions must match for the resource to be mutated. Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT.",
+}
+
+func (MutatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string {
+ return map_MutatingAdmissionPolicyBindingSpec
+}
+
+var map_MutatingAdmissionPolicyList = map[string]string{
+ "": "MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "items": "List of ValidatingAdmissionPolicy.",
+}
+
+func (MutatingAdmissionPolicyList) SwaggerDoc() map[string]string {
+ return map_MutatingAdmissionPolicyList
+}
+
+var map_MutatingAdmissionPolicySpec = map[string]string{
+ "": "MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.",
+ "paramKind": "paramKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.",
+ "matchConstraints": "matchConstraints specifies what resources this policy is designed to validate. The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT. Required.",
+ "variables": "variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except matchConditions because matchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, variables must be sorted by the order of first appearance and acyclic.",
+ "mutations": "mutations contain operations to perform on matching objects. mutations may not be empty; a minimum of one mutation is required. mutations are evaluated in order, and are reinvoked according to the reinvocationPolicy. The mutations of a policy are invoked for each binding of this policy and reinvocation of mutations occurs on a per binding basis.",
+ "failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if paramKind refers to a non-existent Kind. A binding is invalid if paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nAllowed values are Ignore or Fail. Defaults to Fail.",
+ "matchConditions": "matchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the matchConstraints. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped",
+ "reinvocationPolicy": "reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: These mutations will not be called more than once per binding in a single admission evaluation.\n\nIfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only reinvoked when mutations change the object after this mutation is invoked. Required.",
+}
+
+func (MutatingAdmissionPolicySpec) SwaggerDoc() map[string]string {
+ return map_MutatingAdmissionPolicySpec
+}
+
var map_MutatingWebhook = map[string]string{
"": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.",
"name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.",
@@ -110,6 +194,17 @@ func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string {
return map_MutatingWebhookConfigurationList
}
+var map_Mutation = map[string]string{
+ "": "Mutation specifies the CEL expression which is used to apply the Mutation.",
+ "patchType": "patchType indicates the patch strategy used. Allowed values are \"ApplyConfiguration\" and \"JSONPatch\". Required.",
+ "applyConfiguration": "applyConfiguration defines the desired configuration values of an object. The configuration is applied to the admission object using [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). A CEL expression is used to create apply configuration.",
+ "jsonPatch": "jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. A CEL expression is used to create the JSON patch.",
+}
+
+func (Mutation) SwaggerDoc() map[string]string {
+ return map_Mutation
+}
+
var map_NamedRuleWithOperations = map[string]string{
"": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.",
"resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.",
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go
index 4c10b1d11..3749a3d14 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go
@@ -27,6 +27,22 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ApplyConfiguration) DeepCopyInto(out *ApplyConfiguration) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyConfiguration.
+func (in *ApplyConfiguration) DeepCopy() *ApplyConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ApplyConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) {
*out = *in
@@ -59,6 +75,22 @@ func (in *ExpressionWarning) DeepCopy() *ExpressionWarning {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JSONPatch) DeepCopyInto(out *JSONPatch) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatch.
+func (in *JSONPatch) DeepCopy() *JSONPatch {
+ if in == nil {
+ return nil
+ }
+ out := new(JSONPatch)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MatchCondition) DeepCopyInto(out *MatchCondition) {
*out = *in
@@ -120,6 +152,200 @@ func (in *MatchResources) DeepCopy() *MatchResources {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicy) DeepCopyInto(out *MutatingAdmissionPolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicy.
+func (in *MutatingAdmissionPolicy) DeepCopy() *MutatingAdmissionPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(MutatingAdmissionPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MutatingAdmissionPolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicyBinding) DeepCopyInto(out *MutatingAdmissionPolicyBinding) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBinding.
+func (in *MutatingAdmissionPolicyBinding) DeepCopy() *MutatingAdmissionPolicyBinding {
+ if in == nil {
+ return nil
+ }
+ out := new(MutatingAdmissionPolicyBinding)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MutatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicyBindingList) DeepCopyInto(out *MutatingAdmissionPolicyBindingList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]MutatingAdmissionPolicyBinding, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingList.
+func (in *MutatingAdmissionPolicyBindingList) DeepCopy() *MutatingAdmissionPolicyBindingList {
+ if in == nil {
+ return nil
+ }
+ out := new(MutatingAdmissionPolicyBindingList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MutatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicyBindingSpec) DeepCopyInto(out *MutatingAdmissionPolicyBindingSpec) {
+ *out = *in
+ if in.ParamRef != nil {
+ in, out := &in.ParamRef, &out.ParamRef
+ *out = new(ParamRef)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.MatchResources != nil {
+ in, out := &in.MatchResources, &out.MatchResources
+ *out = new(MatchResources)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingSpec.
+func (in *MutatingAdmissionPolicyBindingSpec) DeepCopy() *MutatingAdmissionPolicyBindingSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(MutatingAdmissionPolicyBindingSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicyList) DeepCopyInto(out *MutatingAdmissionPolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]MutatingAdmissionPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyList.
+func (in *MutatingAdmissionPolicyList) DeepCopy() *MutatingAdmissionPolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(MutatingAdmissionPolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MutatingAdmissionPolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicySpec) DeepCopyInto(out *MutatingAdmissionPolicySpec) {
+ *out = *in
+ if in.ParamKind != nil {
+ in, out := &in.ParamKind, &out.ParamKind
+ *out = new(ParamKind)
+ **out = **in
+ }
+ if in.MatchConstraints != nil {
+ in, out := &in.MatchConstraints, &out.MatchConstraints
+ *out = new(MatchResources)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Variables != nil {
+ in, out := &in.Variables, &out.Variables
+ *out = make([]Variable, len(*in))
+ copy(*out, *in)
+ }
+ if in.Mutations != nil {
+ in, out := &in.Mutations, &out.Mutations
+ *out = make([]Mutation, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.FailurePolicy != nil {
+ in, out := &in.FailurePolicy, &out.FailurePolicy
+ *out = new(FailurePolicyType)
+ **out = **in
+ }
+ if in.MatchConditions != nil {
+ in, out := &in.MatchConditions, &out.MatchConditions
+ *out = make([]MatchCondition, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicySpec.
+func (in *MutatingAdmissionPolicySpec) DeepCopy() *MutatingAdmissionPolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(MutatingAdmissionPolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) {
*out = *in
@@ -168,7 +394,7 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) {
}
if in.ReinvocationPolicy != nil {
in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy
- *out = new(ReinvocationPolicyType)
+ *out = new(admissionregistrationv1.ReinvocationPolicyType)
**out = **in
}
if in.MatchConditions != nil {
@@ -255,6 +481,32 @@ func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object {
return nil
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Mutation) DeepCopyInto(out *Mutation) {
+ *out = *in
+ if in.ApplyConfiguration != nil {
+ in, out := &in.ApplyConfiguration, &out.ApplyConfiguration
+ *out = new(ApplyConfiguration)
+ **out = **in
+ }
+ if in.JSONPatch != nil {
+ in, out := &in.JSONPatch, &out.JSONPatch
+ *out = new(JSONPatch)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mutation.
+func (in *Mutation) DeepCopy() *Mutation {
+ if in == nil {
+ return nil
+ }
+ out := new(Mutation)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) {
*out = *in
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go
index c1be5122a..4fc0596b3 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go
@@ -25,6 +25,78 @@ import (
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *MutatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) {
+ return 1, 34
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *MutatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) {
+ return 1, 37
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *MutatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) {
+ return 1, 40
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *MutatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) {
+ return 1, 34
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *MutatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) {
+ return 1, 37
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *MutatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) {
+ return 1, 40
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *MutatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 34
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *MutatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 37
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *MutatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) {
+ return 1, 40
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *MutatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 34
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *MutatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 37
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *MutatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) {
+ return 1, 40
+}
+
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *MutatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) {
diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto
index 38c8997e9..5885a6222 100644
--- a/vendor/k8s.io/api/apps/v1/generated.proto
+++ b/vendor/k8s.io/api/apps/v1/generated.proto
@@ -530,7 +530,7 @@ message RollingUpdateDaemonSet {
// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
// on that node is marked deleted. If the old pod becomes unavailable for any
// reason (Ready transitions to false, is evicted, or is drained) an updated
- // pod is immediatedly created on that node without considering surge limits.
+ // pod is immediately created on that node without considering surge limits.
// Allowing surge implies the possibility that the resources consumed by the
// daemonset on any given node can double if the readiness check fails, and
// so resource intensive daemonsets should take into account that they may
diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go
index 1362d875d..4cf54cc99 100644
--- a/vendor/k8s.io/api/apps/v1/types.go
+++ b/vendor/k8s.io/api/apps/v1/types.go
@@ -635,7 +635,7 @@ type RollingUpdateDaemonSet struct {
// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
// on that node is marked deleted. If the old pod becomes unavailable for any
// reason (Ready transitions to false, is evicted, or is drained) an updated
- // pod is immediatedly created on that node without considering surge limits.
+ // pod is immediately created on that node without considering surge limits.
// Allowing surge implies the possibility that the resources consumed by the
// daemonset on any given node can double if the readiness check fails, and
// so resource intensive daemonsets should take into account that they may
diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
index f44ba7bc3..ac54033fd 100644
--- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
@@ -265,7 +265,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string {
var map_RollingUpdateDaemonSet = map[string]string{
"": "Spec to control the desired behavior of daemon set rolling update.",
"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
- "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
+ "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
}
func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto
index 0601efc3c..b61dc490d 100644
--- a/vendor/k8s.io/api/apps/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto
@@ -316,6 +316,9 @@ message Scale {
message ScaleSpec {
// replicas is the number of observed instances of the scaled object.
// +optional
+ // +k8s:optional
+ // +default=0
+ // +k8s:minimum=0
optional int32 replicas = 1;
}
diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go
index 5530c990d..cd140be12 100644
--- a/vendor/k8s.io/api/apps/v1beta1/types.go
+++ b/vendor/k8s.io/api/apps/v1beta1/types.go
@@ -33,6 +33,9 @@ const (
type ScaleSpec struct {
// replicas is the number of observed instances of the scaled object.
// +optional
+ // +k8s:optional
+ // +default=0
+ // +k8s:minimum=0
Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
}
@@ -60,6 +63,7 @@ type ScaleStatus struct {
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
// +k8s:prerelease-lifecycle-gen:removed=1.16
// +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale
+// +k8s:isSubresource=/scale
// Scale represents a scaling request for a resource.
type Scale struct {
diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto
index 68c463e25..37c6d5ae1 100644
--- a/vendor/k8s.io/api/apps/v1beta2/generated.proto
+++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto
@@ -536,7 +536,7 @@ message RollingUpdateDaemonSet {
// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
// on that node is marked deleted. If the old pod becomes unavailable for any
// reason (Ready transitions to false, is evicted, or is drained) an updated
- // pod is immediatedly created on that node without considering surge limits.
+ // pod is immediately created on that node without considering surge limits.
// Allowing surge implies the possibility that the resources consumed by the
// daemonset on any given node can double if the readiness check fails, and
// so resource intensive daemonsets should take into account that they may
@@ -614,6 +614,9 @@ message Scale {
message ScaleSpec {
// desired number of instances for the scaled object.
// +optional
+ // +k8s:optional
+ // +default=0
+ // +k8s:minimum=0
optional int32 replicas = 1;
}
diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go
index 491afc59f..e9dc85df0 100644
--- a/vendor/k8s.io/api/apps/v1beta2/types.go
+++ b/vendor/k8s.io/api/apps/v1beta2/types.go
@@ -35,6 +35,9 @@ const (
type ScaleSpec struct {
// desired number of instances for the scaled object.
// +optional
+ // +k8s:optional
+ // +default=0
+ // +k8s:minimum=0
Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
}
@@ -63,6 +66,7 @@ type ScaleStatus struct {
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
// +k8s:prerelease-lifecycle-gen:removed=1.16
// +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale
+// +k8s:isSubresource=/scale
// Scale represents a scaling request for a resource.
type Scale struct {
@@ -681,7 +685,7 @@ type RollingUpdateDaemonSet struct {
// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
// on that node is marked deleted. If the old pod becomes unavailable for any
// reason (Ready transitions to false, is evicted, or is drained) an updated
- // pod is immediatedly created on that node without considering surge limits.
+ // pod is immediately created on that node without considering surge limits.
// Allowing surge implies the possibility that the resources consumed by the
// daemonset on any given node can double if the readiness check fails, and
// so resource intensive daemonsets should take into account that they may
diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
index 408943415..34d80af58 100644
--- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
@@ -265,7 +265,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string {
var map_RollingUpdateDaemonSet = map[string]string{
"": "Spec to control the desired behavior of daemon set rolling update.",
"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
- "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
+ "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
}
func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto
index 37b05b855..ff529c969 100644
--- a/vendor/k8s.io/api/authorization/v1/generated.proto
+++ b/vendor/k8s.io/api/authorization/v1/generated.proto
@@ -167,16 +167,10 @@ message ResourceAttributes {
optional string name = 7;
// fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.
- //
- // This field is alpha-level. To use this field, you must enable the
- // `AuthorizeWithSelectors` feature gate (disabled by default).
// +optional
optional FieldSelectorAttributes fieldSelector = 8;
// labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.
- //
- // This field is alpha-level. To use this field, you must enable the
- // `AuthorizeWithSelectors` feature gate (disabled by default).
// +optional
optional LabelSelectorAttributes labelSelector = 9;
}
diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go
index 36f5fa410..251e776b0 100644
--- a/vendor/k8s.io/api/authorization/v1/types.go
+++ b/vendor/k8s.io/api/authorization/v1/types.go
@@ -119,15 +119,9 @@ type ResourceAttributes struct {
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"`
// fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.
- //
- // This field is alpha-level. To use this field, you must enable the
- // `AuthorizeWithSelectors` feature gate (disabled by default).
// +optional
FieldSelector *FieldSelectorAttributes `json:"fieldSelector,omitempty" protobuf:"bytes,8,opt,name=fieldSelector"`
// labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.
- //
- // This field is alpha-level. To use this field, you must enable the
- // `AuthorizeWithSelectors` feature gate (disabled by default).
// +optional
LabelSelector *LabelSelectorAttributes `json:"labelSelector,omitempty" protobuf:"bytes,9,opt,name=labelSelector"`
}
diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
index dc6b8a89e..29d0aa846 100644
--- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
@@ -87,8 +87,8 @@ var map_ResourceAttributes = map[string]string{
"resource": "Resource is one of the existing resource types. \"*\" means all.",
"subresource": "Subresource is one of the existing resource types. \"\" means none.",
"name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.",
- "fieldSelector": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).",
- "labelSelector": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).",
+ "fieldSelector": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.",
+ "labelSelector": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.",
}
func (ResourceAttributes) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto
index 68c35b6b2..a17d7989d 100644
--- a/vendor/k8s.io/api/autoscaling/v1/generated.proto
+++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto
@@ -472,6 +472,9 @@ message Scale {
message ScaleSpec {
// replicas is the desired number of instances for the scaled object.
// +optional
+ // +k8s:optional
+ // +default=0
+ // +k8s:minimum=0
optional int32 replicas = 1;
}
diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go
index 85c609e5c..e1e8809fe 100644
--- a/vendor/k8s.io/api/autoscaling/v1/types.go
+++ b/vendor/k8s.io/api/autoscaling/v1/types.go
@@ -117,6 +117,7 @@ type HorizontalPodAutoscalerList struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.2
+// +k8s:isSubresource=/scale
// Scale represents a scaling request for a resource.
type Scale struct {
@@ -138,6 +139,9 @@ type Scale struct {
type ScaleSpec struct {
// replicas is the desired number of instances for the scaled object.
// +optional
+ // +k8s:optional
+ // +default=0
+ // +k8s:minimum=0
Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
}
diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto
index d3aeae0ad..c0ce8cef2 100644
--- a/vendor/k8s.io/api/batch/v1/generated.proto
+++ b/vendor/k8s.io/api/batch/v1/generated.proto
@@ -226,7 +226,8 @@ message JobSpec {
optional SuccessPolicy successPolicy = 16;
// Specifies the number of retries before marking this job failed.
- // Defaults to 6
+ // Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified.
+ // When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647.
// +optional
optional int32 backoffLimit = 7;
@@ -329,8 +330,6 @@ message JobSpec {
//
// When using podFailurePolicy, Failed is the the only allowed value.
// TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
- // This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle.
- // This is on by default.
// +optional
optional string podReplacementPolicy = 14;
@@ -570,7 +569,7 @@ message PodFailurePolicyRule {
message SuccessPolicy {
// rules represents the list of alternative rules for the declaring the Jobs
// as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met,
- // the "SucceededCriteriaMet" condition is added, and the lingering pods are removed.
+ // the "SuccessCriteriaMet" condition is added, and the lingering pods are removed.
// The terminal state for such a Job has the "Complete" condition.
// Additionally, these rules are evaluated in order; Once the Job meets one of the rules,
// other rules are ignored. At most 20 elements are allowed.
diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go
index 6c0007c21..9183c073d 100644
--- a/vendor/k8s.io/api/batch/v1/types.go
+++ b/vendor/k8s.io/api/batch/v1/types.go
@@ -257,7 +257,7 @@ type PodFailurePolicy struct {
type SuccessPolicy struct {
// rules represents the list of alternative rules for the declaring the Jobs
// as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met,
- // the "SucceededCriteriaMet" condition is added, and the lingering pods are removed.
+ // the "SuccessCriteriaMet" condition is added, and the lingering pods are removed.
// The terminal state for such a Job has the "Complete" condition.
// Additionally, these rules are evaluated in order; Once the Job meets one of the rules,
// other rules are ignored. At most 20 elements are allowed.
@@ -347,7 +347,8 @@ type JobSpec struct {
SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"`
// Specifies the number of retries before marking this job failed.
- // Defaults to 6
+ // Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified.
+ // When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647.
// +optional
BackoffLimit *int32 `json:"backoffLimit,omitempty" protobuf:"varint,7,opt,name=backoffLimit"`
@@ -455,8 +456,6 @@ type JobSpec struct {
//
// When using podFailurePolicy, Failed is the the only allowed value.
// TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
- // This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle.
- // This is on by default.
// +optional
PodReplacementPolicy *PodReplacementPolicy `json:"podReplacementPolicy,omitempty" protobuf:"bytes,14,opt,name=podReplacementPolicy,casttype=podReplacementPolicy"`
diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
index ffd4e4f5f..451f4609f 100644
--- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
@@ -117,7 +117,7 @@ var map_JobSpec = map[string]string{
"activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.",
"podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.",
"successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.",
- "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6",
+ "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified. When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647.",
"backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.",
"maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.",
"selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
@@ -126,7 +126,7 @@ var map_JobSpec = map[string]string{
"ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.",
"completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.",
"suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.",
- "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.",
+ "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.",
"managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is beta-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (enabled by default).",
}
@@ -206,7 +206,7 @@ func (PodFailurePolicyRule) SwaggerDoc() map[string]string {
var map_SuccessPolicy = map[string]string{
"": "SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.",
- "rules": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SucceededCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.",
+ "rules": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SuccessCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.",
}
func (SuccessPolicy) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/certificates/v1/generated.proto b/vendor/k8s.io/api/certificates/v1/generated.proto
index dac7c7f5f..24528fc8b 100644
--- a/vendor/k8s.io/api/certificates/v1/generated.proto
+++ b/vendor/k8s.io/api/certificates/v1/generated.proto
@@ -39,6 +39,8 @@ option go_package = "k8s.io/api/certificates/v1";
// This API can be used to request client certificates to authenticate to kube-apiserver
// (with the "kubernetes.io/kube-apiserver-client" signerName),
// or to obtain certificates from custom non-Kubernetes signers.
+// +k8s:supportsSubresource=/status
+// +k8s:supportsSubresource=/approval
message CertificateSigningRequest {
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@@ -203,6 +205,11 @@ message CertificateSigningRequestStatus {
// +listType=map
// +listMapKey=type
// +optional
+ // +k8s:listType=map
+ // +k8s:listMapKey=type
+ // +k8s:optional
+ // +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
+ // +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
repeated CertificateSigningRequestCondition conditions = 1;
// certificate is populated with an issued certificate by the signer after an Approved condition is present.
diff --git a/vendor/k8s.io/api/certificates/v1/types.go b/vendor/k8s.io/api/certificates/v1/types.go
index ba8009840..71203e80d 100644
--- a/vendor/k8s.io/api/certificates/v1/types.go
+++ b/vendor/k8s.io/api/certificates/v1/types.go
@@ -39,6 +39,8 @@ import (
// This API can be used to request client certificates to authenticate to kube-apiserver
// (with the "kubernetes.io/kube-apiserver-client" signerName),
// or to obtain certificates from custom non-Kubernetes signers.
+// +k8s:supportsSubresource=/status
+// +k8s:supportsSubresource=/approval
type CertificateSigningRequest struct {
metav1.TypeMeta `json:",inline"`
// +optional
@@ -178,6 +180,11 @@ type CertificateSigningRequestStatus struct {
// +listType=map
// +listMapKey=type
// +optional
+ // +k8s:listType=map
+ // +k8s:listMapKey=type
+ // +k8s:optional
+ // +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
+ // +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
// certificate is populated with an issued certificate by the signer after an Approved condition is present.
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go
index a62a40059..c260f0436 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go
+++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go
@@ -25,11 +25,14 @@ import (
io "io"
proto "github.com/gogo/protobuf/proto"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
+
+ k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
)
// Reference imports to suppress errors if they are not otherwise used.
@@ -127,10 +130,126 @@ func (m *ClusterTrustBundleSpec) XXX_DiscardUnknown() {
var xxx_messageInfo_ClusterTrustBundleSpec proto.InternalMessageInfo
+func (m *PodCertificateRequest) Reset() { *m = PodCertificateRequest{} }
+func (*PodCertificateRequest) ProtoMessage() {}
+func (*PodCertificateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f73d5fe56c015bb8, []int{3}
+}
+func (m *PodCertificateRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PodCertificateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *PodCertificateRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PodCertificateRequest.Merge(m, src)
+}
+func (m *PodCertificateRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *PodCertificateRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_PodCertificateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodCertificateRequest proto.InternalMessageInfo
+
+func (m *PodCertificateRequestList) Reset() { *m = PodCertificateRequestList{} }
+func (*PodCertificateRequestList) ProtoMessage() {}
+func (*PodCertificateRequestList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f73d5fe56c015bb8, []int{4}
+}
+func (m *PodCertificateRequestList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PodCertificateRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *PodCertificateRequestList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PodCertificateRequestList.Merge(m, src)
+}
+func (m *PodCertificateRequestList) XXX_Size() int {
+ return m.Size()
+}
+func (m *PodCertificateRequestList) XXX_DiscardUnknown() {
+ xxx_messageInfo_PodCertificateRequestList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodCertificateRequestList proto.InternalMessageInfo
+
+func (m *PodCertificateRequestSpec) Reset() { *m = PodCertificateRequestSpec{} }
+func (*PodCertificateRequestSpec) ProtoMessage() {}
+func (*PodCertificateRequestSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f73d5fe56c015bb8, []int{5}
+}
+func (m *PodCertificateRequestSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PodCertificateRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *PodCertificateRequestSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PodCertificateRequestSpec.Merge(m, src)
+}
+func (m *PodCertificateRequestSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *PodCertificateRequestSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_PodCertificateRequestSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodCertificateRequestSpec proto.InternalMessageInfo
+
+func (m *PodCertificateRequestStatus) Reset() { *m = PodCertificateRequestStatus{} }
+func (*PodCertificateRequestStatus) ProtoMessage() {}
+func (*PodCertificateRequestStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f73d5fe56c015bb8, []int{6}
+}
+func (m *PodCertificateRequestStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PodCertificateRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *PodCertificateRequestStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PodCertificateRequestStatus.Merge(m, src)
+}
+func (m *PodCertificateRequestStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *PodCertificateRequestStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_PodCertificateRequestStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodCertificateRequestStatus proto.InternalMessageInfo
+
func init() {
proto.RegisterType((*ClusterTrustBundle)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundle")
proto.RegisterType((*ClusterTrustBundleList)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleList")
proto.RegisterType((*ClusterTrustBundleSpec)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleSpec")
+ proto.RegisterType((*PodCertificateRequest)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequest")
+ proto.RegisterType((*PodCertificateRequestList)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestList")
+ proto.RegisterType((*PodCertificateRequestSpec)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestSpec")
+ proto.RegisterType((*PodCertificateRequestStatus)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestStatus")
}
func init() {
@@ -138,35 +257,65 @@ func init() {
}
var fileDescriptor_f73d5fe56c015bb8 = []byte{
- // 437 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6a, 0xdb, 0x40,
- 0x10, 0xc6, 0xb5, 0x69, 0x02, 0xc9, 0xba, 0x85, 0xa2, 0x42, 0x31, 0x3e, 0x6c, 0x8c, 0x4f, 0xb9,
- 0x74, 0x37, 0x36, 0x69, 0xc9, 0x59, 0x85, 0x42, 0xa1, 0x7f, 0x40, 0xe9, 0xa5, 0xa1, 0x87, 0xae,
- 0xd7, 0x13, 0x79, 0x6b, 0x4b, 0x5a, 0x76, 0x57, 0x86, 0xde, 0x0a, 0x7d, 0x81, 0x3e, 0x96, 0x8f,
- 0x69, 0x4f, 0x39, 0x85, 0x5a, 0x7d, 0x91, 0xb2, 0x6b, 0xd9, 0x12, 0x55, 0x8b, 0x4b, 0x6e, 0x9a,
- 0xd1, 0xfc, 0xbe, 0x6f, 0xbe, 0x11, 0xc2, 0xa7, 0xb3, 0x73, 0x43, 0x65, 0xce, 0xb8, 0x92, 0x4c,
- 0x80, 0xb6, 0xf2, 0x4a, 0x0a, 0x6e, 0xc1, 0xb0, 0xc5, 0x90, 0xcf, 0xd5, 0x94, 0x0f, 0x59, 0x02,
- 0x19, 0x68, 0x6e, 0x61, 0x42, 0x95, 0xce, 0x6d, 0x1e, 0xf6, 0xd7, 0x04, 0xe5, 0x4a, 0xd2, 0x26,
- 0x41, 0x37, 0x44, 0xef, 0x49, 0x22, 0xed, 0xb4, 0x18, 0x53, 0x91, 0xa7, 0x2c, 0xc9, 0x93, 0x9c,
- 0x79, 0x70, 0x5c, 0x5c, 0xf9, 0xca, 0x17, 0xfe, 0x69, 0x2d, 0xd8, 0x3b, 0xab, 0x57, 0x48, 0xb9,
- 0x98, 0xca, 0x0c, 0xf4, 0x67, 0xa6, 0x66, 0x89, 0x6b, 0x18, 0x96, 0x82, 0xe5, 0x6c, 0xd1, 0x5a,
- 0xa3, 0xc7, 0xfe, 0x45, 0xe9, 0x22, 0xb3, 0x32, 0x85, 0x16, 0xf0, 0x6c, 0x17, 0x60, 0xc4, 0x14,
- 0x52, 0xfe, 0x27, 0x37, 0xf8, 0x81, 0x70, 0xf8, 0x7c, 0x5e, 0x18, 0x0b, 0xfa, 0x9d, 0x2e, 0x8c,
- 0x8d, 0x8a, 0x6c, 0x32, 0x87, 0xf0, 0x23, 0x3e, 0x74, 0xab, 0x4d, 0xb8, 0xe5, 0x5d, 0xd4, 0x47,
- 0x27, 0x9d, 0xd1, 0x29, 0xad, 0x2f, 0xb3, 0x75, 0xa0, 0x6a, 0x96, 0xb8, 0x86, 0xa1, 0x6e, 0x9a,
- 0x2e, 0x86, 0xf4, 0xed, 0xf8, 0x13, 0x08, 0xfb, 0x1a, 0x2c, 0x8f, 0xc2, 0xe5, 0xed, 0x71, 0x50,
- 0xde, 0x1e, 0xe3, 0xba, 0x17, 0x6f, 0x55, 0xc3, 0x4b, 0xbc, 0x6f, 0x14, 0x88, 0xee, 0x9e, 0x57,
- 0x3f, 0xa7, 0xbb, 0xee, 0x4e, 0xdb, 0x5b, 0x5e, 0x28, 0x10, 0xd1, 0xfd, 0xca, 0x65, 0xdf, 0x55,
- 0xb1, 0xd7, 0x1c, 0x7c, 0x47, 0xf8, 0x71, 0x7b, 0xfc, 0x95, 0x34, 0x36, 0xfc, 0xd0, 0x0a, 0x46,
- 0xff, 0x2f, 0x98, 0xa3, 0x7d, 0xac, 0x87, 0x95, 0xe1, 0xe1, 0xa6, 0xd3, 0x08, 0xf5, 0x1e, 0x1f,
- 0x48, 0x0b, 0xa9, 0xe9, 0xee, 0xf5, 0xef, 0x9d, 0x74, 0x46, 0x67, 0x77, 0x49, 0x15, 0x3d, 0xa8,
- 0x0c, 0x0e, 0x5e, 0x3a, 0xa9, 0x78, 0xad, 0x38, 0xf8, 0xfa, 0xd7, 0x4c, 0x2e, 0x74, 0x38, 0xc2,
- 0xd8, 0xc8, 0x24, 0x03, 0xfd, 0x86, 0xa7, 0xe0, 0x53, 0x1d, 0xd5, 0xc7, 0xbf, 0xd8, 0xbe, 0x89,
- 0x1b, 0x53, 0xe1, 0x53, 0xdc, 0xb1, 0xb5, 0x8c, 0xff, 0x0a, 0x47, 0xd1, 0xa3, 0x0a, 0xea, 0x34,
- 0x1c, 0xe2, 0xe6, 0x5c, 0xf4, 0x62, 0xb9, 0x22, 0xc1, 0xf5, 0x8a, 0x04, 0x37, 0x2b, 0x12, 0x7c,
- 0x29, 0x09, 0x5a, 0x96, 0x04, 0x5d, 0x97, 0x04, 0xdd, 0x94, 0x04, 0xfd, 0x2c, 0x09, 0xfa, 0xf6,
- 0x8b, 0x04, 0x97, 0xfd, 0x5d, 0xbf, 0xdd, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x1c, 0xcb,
- 0xdd, 0x99, 0x03, 0x00, 0x00,
+ // 918 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0xcf, 0x6f, 0xe3, 0x44,
+ 0x14, 0xc7, 0xe3, 0xb6, 0x69, 0x9b, 0x49, 0x5b, 0xda, 0x61, 0x17, 0x99, 0x22, 0x39, 0x21, 0x07,
+ 0x54, 0x90, 0xb0, 0xb7, 0xa5, 0xb0, 0x2b, 0x10, 0x48, 0x75, 0x0a, 0x52, 0xe9, 0x6e, 0x36, 0x9a,
+ 0x74, 0xf9, 0xb1, 0x5a, 0x24, 0x1c, 0xe7, 0x25, 0x19, 0x1a, 0x7b, 0x8c, 0x67, 0x5c, 0xb5, 0x37,
+ 0x24, 0xfe, 0x01, 0xfe, 0x23, 0xae, 0x3d, 0x2e, 0x5c, 0xd8, 0x53, 0xa0, 0xe6, 0x6f, 0xe0, 0xb2,
+ 0x27, 0xe4, 0xb1, 0x9d, 0x5f, 0x4e, 0xb6, 0xd9, 0x1e, 0x7a, 0xcb, 0xbc, 0x79, 0xdf, 0xcf, 0xfb,
+ 0xbe, 0x99, 0x37, 0x56, 0xd0, 0xbd, 0xd3, 0x07, 0x5c, 0xa7, 0xcc, 0xb0, 0x3c, 0x6a, 0xd8, 0xe0,
+ 0x0b, 0xda, 0xa6, 0xb6, 0x25, 0x80, 0x1b, 0x67, 0xbb, 0x56, 0xcf, 0xeb, 0x5a, 0xbb, 0x46, 0x07,
+ 0x5c, 0xf0, 0x2d, 0x01, 0x2d, 0xdd, 0xf3, 0x99, 0x60, 0xb8, 0x1c, 0x2b, 0x74, 0xcb, 0xa3, 0xfa,
+ 0xa8, 0x42, 0x4f, 0x15, 0xdb, 0x1f, 0x76, 0xa8, 0xe8, 0x06, 0x4d, 0xdd, 0x66, 0x8e, 0xd1, 0x61,
+ 0x1d, 0x66, 0x48, 0x61, 0x33, 0x68, 0xcb, 0x95, 0x5c, 0xc8, 0x5f, 0x31, 0x70, 0x7b, 0x7f, 0x68,
+ 0xc1, 0xb1, 0xec, 0x2e, 0x75, 0xc1, 0xbf, 0x30, 0xbc, 0xd3, 0x4e, 0x14, 0xe0, 0x86, 0x03, 0xc2,
+ 0x32, 0xce, 0x32, 0x36, 0xb6, 0x8d, 0x59, 0x2a, 0x3f, 0x70, 0x05, 0x75, 0x20, 0x23, 0xf8, 0xe4,
+ 0x3a, 0x01, 0xb7, 0xbb, 0xe0, 0x58, 0x93, 0xba, 0xca, 0x9f, 0x0a, 0xc2, 0xd5, 0x5e, 0xc0, 0x05,
+ 0xf8, 0x27, 0x7e, 0xc0, 0x85, 0x19, 0xb8, 0xad, 0x1e, 0xe0, 0x1f, 0xd1, 0x6a, 0x64, 0xad, 0x65,
+ 0x09, 0x4b, 0x55, 0xca, 0xca, 0x4e, 0x71, 0xef, 0x9e, 0x3e, 0x3c, 0x99, 0x41, 0x05, 0xdd, 0x3b,
+ 0xed, 0x44, 0x01, 0xae, 0x47, 0xd9, 0xfa, 0xd9, 0xae, 0xfe, 0xb8, 0xf9, 0x13, 0xd8, 0xe2, 0x11,
+ 0x08, 0xcb, 0xc4, 0x97, 0xfd, 0x52, 0x2e, 0xec, 0x97, 0xd0, 0x30, 0x46, 0x06, 0x54, 0xfc, 0x14,
+ 0x2d, 0x71, 0x0f, 0x6c, 0x75, 0x41, 0xd2, 0x1f, 0xe8, 0xd7, 0x9d, 0xbb, 0x9e, 0x75, 0xd9, 0xf0,
+ 0xc0, 0x36, 0xd7, 0x92, 0x2a, 0x4b, 0xd1, 0x8a, 0x48, 0x66, 0xe5, 0x0f, 0x05, 0xbd, 0x95, 0x4d,
+ 0x7f, 0x48, 0xb9, 0xc0, 0xcf, 0x32, 0x8d, 0xe9, 0xf3, 0x35, 0x16, 0xa9, 0x65, 0x5b, 0x9b, 0x49,
+ 0xc1, 0xd5, 0x34, 0x32, 0xd2, 0xd4, 0xf7, 0x28, 0x4f, 0x05, 0x38, 0x5c, 0x5d, 0x28, 0x2f, 0xee,
+ 0x14, 0xf7, 0xf6, 0x6f, 0xd2, 0x95, 0xb9, 0x9e, 0x14, 0xc8, 0x1f, 0x45, 0x28, 0x12, 0x13, 0x2b,
+ 0xbf, 0x4e, 0xed, 0x29, 0x6a, 0x1a, 0xef, 0x21, 0xc4, 0x69, 0xc7, 0x05, 0xbf, 0x66, 0x39, 0x20,
+ 0xbb, 0x2a, 0x0c, 0x0f, 0xbf, 0x31, 0xd8, 0x21, 0x23, 0x59, 0xf8, 0x63, 0x54, 0x14, 0x43, 0x8c,
+ 0xbc, 0x85, 0x82, 0xf9, 0x66, 0x22, 0x2a, 0x8e, 0x54, 0x20, 0xa3, 0x79, 0x95, 0xdf, 0x17, 0xd0,
+ 0xdd, 0x3a, 0x6b, 0x55, 0x87, 0xbd, 0x10, 0xf8, 0x39, 0x00, 0x2e, 0x6e, 0x61, 0x62, 0x7e, 0x18,
+ 0x9b, 0x98, 0xcf, 0xae, 0x3f, 0xdb, 0xa9, 0x46, 0x67, 0x0d, 0x0d, 0x06, 0xb4, 0xcc, 0x85, 0x25,
+ 0x02, 0xae, 0x2e, 0xca, 0x02, 0x9f, 0xdf, 0xb4, 0x80, 0x84, 0x98, 0x1b, 0x49, 0x89, 0xe5, 0x78,
+ 0x4d, 0x12, 0x78, 0xe5, 0x2f, 0x05, 0xbd, 0x3d, 0x55, 0x77, 0x0b, 0xe3, 0xf9, 0x6c, 0x7c, 0x3c,
+ 0xef, 0xdf, 0xb0, 0xc3, 0x19, 0x13, 0xfa, 0x5f, 0x7e, 0x46, 0x67, 0x37, 0x1e, 0xd2, 0xf7, 0xd1,
+ 0x8a, 0xc7, 0x5a, 0x52, 0x10, 0x0f, 0xe8, 0x1b, 0x89, 0x60, 0xa5, 0x1e, 0x87, 0x49, 0xba, 0x8f,
+ 0x8f, 0xd1, 0xb2, 0xc7, 0x5a, 0x4f, 0x8e, 0x0e, 0xe5, 0xed, 0x15, 0xcc, 0x8f, 0xd2, 0xe3, 0xaf,
+ 0xcb, 0xe8, 0xcb, 0x7e, 0xe9, 0xdd, 0x59, 0x5f, 0x48, 0x71, 0xe1, 0x01, 0xd7, 0x9f, 0x1c, 0x1d,
+ 0x92, 0x04, 0x81, 0xbf, 0x46, 0x98, 0x83, 0x7f, 0x46, 0x6d, 0x38, 0xb0, 0x6d, 0x16, 0xb8, 0x42,
+ 0x5a, 0x58, 0x92, 0xe0, 0xed, 0x04, 0x8c, 0x1b, 0x99, 0x0c, 0x32, 0x45, 0x85, 0x7b, 0x68, 0x6b,
+ 0x3c, 0x1a, 0x79, 0xcc, 0x4b, 0xd4, 0x17, 0x09, 0x6a, 0xab, 0x31, 0x99, 0x30, 0x9f, 0xdd, 0x2c,
+ 0x18, 0x7f, 0x83, 0x56, 0x5d, 0xd6, 0x02, 0xe9, 0x77, 0x59, 0x16, 0xf9, 0x34, 0x9d, 0x87, 0x5a,
+ 0x12, 0x7f, 0xd9, 0x2f, 0xbd, 0xf7, 0x6a, 0x76, 0x9a, 0x49, 0x06, 0x2c, 0x5c, 0x43, 0x2b, 0xd1,
+ 0xef, 0xc8, 0xfb, 0x8a, 0xc4, 0xee, 0xa7, 0x37, 0x51, 0x8b, 0xc3, 0xf3, 0x39, 0x4e, 0x21, 0xf8,
+ 0x21, 0xba, 0xe3, 0x58, 0xe7, 0x5f, 0x9e, 0x7b, 0xd4, 0xb7, 0x04, 0x65, 0x6e, 0x03, 0x6c, 0xe6,
+ 0xb6, 0xb8, 0xba, 0x5a, 0x56, 0x76, 0xf2, 0xa6, 0x1a, 0xf6, 0x4b, 0x77, 0x1e, 0x4d, 0xd9, 0x27,
+ 0x53, 0x55, 0xf8, 0x3e, 0x5a, 0xf7, 0x4e, 0xe9, 0x79, 0x3d, 0x68, 0xf6, 0xa8, 0x7d, 0x0c, 0x17,
+ 0x6a, 0xa1, 0xac, 0xec, 0xac, 0x99, 0x5b, 0x61, 0xbf, 0xb4, 0x5e, 0x3f, 0x3e, 0xfa, 0x6e, 0xb0,
+ 0x41, 0xc6, 0xf3, 0x70, 0x15, 0x6d, 0x79, 0x3e, 0x63, 0xed, 0xc7, 0xed, 0x3a, 0xe3, 0x1c, 0x38,
+ 0xa7, 0xcc, 0x55, 0x91, 0x14, 0xdf, 0x8d, 0x2e, 0xa6, 0x3e, 0xb9, 0x49, 0xb2, 0xf9, 0x95, 0xbf,
+ 0x17, 0xd1, 0x3b, 0xaf, 0xf8, 0x12, 0x60, 0x1b, 0xa1, 0xc8, 0x26, 0x8d, 0x1c, 0x73, 0x55, 0x91,
+ 0x4f, 0xcf, 0x98, 0xef, 0x55, 0x57, 0x53, 0xdd, 0xf0, 0xa9, 0x0c, 0x42, 0x9c, 0x8c, 0x60, 0xf1,
+ 0x21, 0xda, 0x1c, 0x79, 0xc1, 0xd5, 0xae, 0x45, 0xdd, 0xe4, 0xcd, 0xa8, 0x89, 0x72, 0xb3, 0x3a,
+ 0xb1, 0x4f, 0x32, 0x0a, 0xfc, 0x2d, 0x2a, 0xb8, 0x4c, 0x98, 0xd0, 0x66, 0x7e, 0x3c, 0xef, 0xc5,
+ 0xbd, 0x0f, 0xe6, 0x73, 0x7a, 0x42, 0x1d, 0x30, 0xd7, 0xc3, 0x7e, 0xa9, 0x50, 0x4b, 0x01, 0x64,
+ 0xc8, 0xc2, 0x6d, 0xb4, 0xd1, 0x84, 0x0e, 0x75, 0x09, 0xb4, 0x7d, 0xe0, 0xdd, 0x03, 0x21, 0x9f,
+ 0xc0, 0xeb, 0xd1, 0x71, 0xd8, 0x2f, 0x6d, 0x98, 0x63, 0x14, 0x32, 0x41, 0xc5, 0x27, 0xd1, 0xfc,
+ 0x8b, 0x83, 0xb6, 0x00, 0x5f, 0xce, 0xff, 0xeb, 0x55, 0x58, 0x8b, 0xdf, 0x49, 0xac, 0x27, 0x03,
+ 0x92, 0xf9, 0xd5, 0xe5, 0x95, 0x96, 0x7b, 0x7e, 0xa5, 0xe5, 0x5e, 0x5c, 0x69, 0xb9, 0x5f, 0x42,
+ 0x4d, 0xb9, 0x0c, 0x35, 0xe5, 0x79, 0xa8, 0x29, 0x2f, 0x42, 0x4d, 0xf9, 0x27, 0xd4, 0x94, 0xdf,
+ 0xfe, 0xd5, 0x72, 0x4f, 0xcb, 0xd7, 0xfd, 0xd9, 0xfc, 0x3f, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x6c,
+ 0x5a, 0xc4, 0x8f, 0x0a, 0x00, 0x00,
}
func (m *ClusterTrustBundle) Marshal() (dAtA []byte, err error) {
@@ -292,6 +441,261 @@ func (m *ClusterTrustBundleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)
return len(dAtA) - i, nil
}
+func (m *PodCertificateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PodCertificateRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodCertificateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *PodCertificateRequestList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PodCertificateRequestList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodCertificateRequestList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *PodCertificateRequestSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PodCertificateRequestSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodCertificateRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ProofOfPossession != nil {
+ i -= len(m.ProofOfPossession)
+ copy(dAtA[i:], m.ProofOfPossession)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProofOfPossession)))
+ i--
+ dAtA[i] = 0x52
+ }
+ if m.PKIXPublicKey != nil {
+ i -= len(m.PKIXPublicKey)
+ copy(dAtA[i:], m.PKIXPublicKey)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PKIXPublicKey)))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.MaxExpirationSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxExpirationSeconds))
+ i--
+ dAtA[i] = 0x40
+ }
+ i -= len(m.NodeUID)
+ copy(dAtA[i:], m.NodeUID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeUID)))
+ i--
+ dAtA[i] = 0x3a
+ i -= len(m.NodeName)
+ copy(dAtA[i:], m.NodeName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.ServiceAccountUID)
+ copy(dAtA[i:], m.ServiceAccountUID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountUID)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.ServiceAccountName)
+ copy(dAtA[i:], m.ServiceAccountName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.PodUID)
+ copy(dAtA[i:], m.PodUID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodUID)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.PodName)
+ copy(dAtA[i:], m.PodName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodName)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.SignerName)
+ copy(dAtA[i:], m.SignerName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *PodCertificateRequestStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PodCertificateRequestStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodCertificateRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NotAfter != nil {
+ {
+ size, err := m.NotAfter.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.BeginRefreshAt != nil {
+ {
+ size, err := m.BeginRefreshAt.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.NotBefore != nil {
+ {
+ size, err := m.NotBefore.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ i -= len(m.CertificateChain)
+ copy(dAtA[i:], m.CertificateChain)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CertificateChain)))
+ i--
+ dAtA[i] = 0x12
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
@@ -346,25 +750,120 @@ func (m *ClusterTrustBundleSpec) Size() (n int) {
return n
}
-func sovGenerated(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozGenerated(x uint64) (n int) {
- return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (this *ClusterTrustBundle) String() string {
- if this == nil {
- return "nil"
+func (m *PodCertificateRequest) Size() (n int) {
+ if m == nil {
+ return 0
}
- s := strings.Join([]string{`&ClusterTrustBundle{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ClusterTrustBundleList) String() string {
- if this == nil {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PodCertificateRequestList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PodCertificateRequestSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SignerName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.PodName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.PodUID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ServiceAccountName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ServiceAccountUID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.NodeName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.NodeUID)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.MaxExpirationSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.MaxExpirationSeconds))
+ }
+ if m.PKIXPublicKey != nil {
+ l = len(m.PKIXPublicKey)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ProofOfPossession != nil {
+ l = len(m.ProofOfPossession)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *PodCertificateRequestStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.CertificateChain)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.NotBefore != nil {
+ l = m.NotBefore.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.BeginRefreshAt != nil {
+ l = m.BeginRefreshAt.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NotAfter != nil {
+ l = m.NotAfter.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *ClusterTrustBundle) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ClusterTrustBundle{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterTrustBundleList) String() string {
+ if this == nil {
return "nil"
}
repeatedStringForItems := "[]ClusterTrustBundle{"
@@ -390,6 +889,72 @@ func (this *ClusterTrustBundleSpec) String() string {
}, "")
return s
}
+func (this *PodCertificateRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PodCertificateRequest{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodCertificateRequestSpec", "PodCertificateRequestSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodCertificateRequestStatus", "PodCertificateRequestStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PodCertificateRequestList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]PodCertificateRequest{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodCertificateRequest", "PodCertificateRequest", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&PodCertificateRequestList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PodCertificateRequestSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PodCertificateRequestSpec{`,
+ `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`,
+ `PodName:` + fmt.Sprintf("%v", this.PodName) + `,`,
+ `PodUID:` + fmt.Sprintf("%v", this.PodUID) + `,`,
+ `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`,
+ `ServiceAccountUID:` + fmt.Sprintf("%v", this.ServiceAccountUID) + `,`,
+ `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
+ `NodeUID:` + fmt.Sprintf("%v", this.NodeUID) + `,`,
+ `MaxExpirationSeconds:` + valueToStringGenerated(this.MaxExpirationSeconds) + `,`,
+ `PKIXPublicKey:` + valueToStringGenerated(this.PKIXPublicKey) + `,`,
+ `ProofOfPossession:` + valueToStringGenerated(this.ProofOfPossession) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PodCertificateRequestStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&PodCertificateRequestStatus{`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `CertificateChain:` + fmt.Sprintf("%v", this.CertificateChain) + `,`,
+ `NotBefore:` + strings.Replace(fmt.Sprintf("%v", this.NotBefore), "Time", "v1.Time", 1) + `,`,
+ `BeginRefreshAt:` + strings.Replace(fmt.Sprintf("%v", this.BeginRefreshAt), "Time", "v1.Time", 1) + `,`,
+ `NotAfter:` + strings.Replace(fmt.Sprintf("%v", this.NotAfter), "Time", "v1.Time", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
@@ -745,6 +1310,858 @@ func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *PodCertificateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodCertificateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodCertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodCertificateRequestList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodCertificateRequestList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodCertificateRequestList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, PodCertificateRequest{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodCertificateRequestSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodCertificateRequestSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodCertificateRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SignerName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PodName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodUID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PodUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceAccountName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountUID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceAccountUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeName = k8s_io_apimachinery_pkg_types.NodeName(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeUID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxExpirationSeconds", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MaxExpirationSeconds = &v
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PKIXPublicKey", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PKIXPublicKey = append(m.PKIXPublicKey[:0], dAtA[iNdEx:postIndex]...)
+ if m.PKIXPublicKey == nil {
+ m.PKIXPublicKey = []byte{}
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofOfPossession", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofOfPossession = append(m.ProofOfPossession[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofOfPossession == nil {
+ m.ProofOfPossession = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodCertificateRequestStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodCertificateRequestStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodCertificateRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, v1.Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CertificateChain", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CertificateChain = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NotBefore", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NotBefore == nil {
+ m.NotBefore = &v1.Time{}
+ }
+ if err := m.NotBefore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BeginRefreshAt", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.BeginRefreshAt == nil {
+ m.BeginRefreshAt = &v1.Time{}
+ }
+ if err := m.BeginRefreshAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NotAfter", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NotAfter == nil {
+ m.NotAfter = &v1.Time{}
+ }
+ if err := m.NotAfter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto
index 7155f778c..194bdbc14 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto
+++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto
@@ -101,3 +101,208 @@ message ClusterTrustBundleSpec {
optional string trustBundle = 2;
}
+// PodCertificateRequest encodes a pod requesting a certificate from a given
+// signer.
+//
+// Kubelets use this API to implement podCertificate projected volumes
+message PodCertificateRequest {
+ // metadata contains the object metadata.
+ //
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // spec contains the details about the certificate being requested.
+ optional PodCertificateRequestSpec spec = 2;
+
+ // status contains the issued certificate, and a standard set of conditions.
+ // +optional
+ optional PodCertificateRequestStatus status = 3;
+}
+
+// PodCertificateRequestList is a collection of PodCertificateRequest objects
+message PodCertificateRequestList {
+ // metadata contains the list metadata.
+ //
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // items is a collection of PodCertificateRequest objects
+ repeated PodCertificateRequest items = 2;
+}
+
+// PodCertificateRequestSpec describes the certificate request. All fields are
+// immutable after creation.
+message PodCertificateRequestSpec {
+ // signerName indicates the requested signer.
+ //
+ // All signer names beginning with `kubernetes.io` are reserved for use by
+ // the Kubernetes project. There is currently one well-known signer
+ // documented by the Kubernetes project,
+ // `kubernetes.io/kube-apiserver-client-pod`, which will issue client
+ // certificates understood by kube-apiserver. It is currently
+ // unimplemented.
+ //
+ // +required
+ optional string signerName = 1;
+
+ // podName is the name of the pod into which the certificate will be mounted.
+ //
+ // +required
+ optional string podName = 2;
+
+ // podUID is the UID of the pod into which the certificate will be mounted.
+ //
+ // +required
+ optional string podUID = 3;
+
+ // serviceAccountName is the name of the service account the pod is running as.
+ //
+ // +required
+ optional string serviceAccountName = 4;
+
+ // serviceAccountUID is the UID of the service account the pod is running as.
+ //
+ // +required
+ optional string serviceAccountUID = 5;
+
+ // nodeName is the name of the node the pod is assigned to.
+ //
+ // +required
+ optional string nodeName = 6;
+
+ // nodeUID is the UID of the node the pod is assigned to.
+ //
+ // +required
+ optional string nodeUID = 7;
+
+ // maxExpirationSeconds is the maximum lifetime permitted for the
+ // certificate.
+ //
+ // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
+ // will reject values shorter than 3600 (1 hour). The maximum allowable
+ // value is 7862400 (91 days).
+ //
+ // The signer implementation is then free to issue a certificate with any
+ // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
+ // seconds (1 hour). This constraint is enforced by kube-apiserver.
+ // `kubernetes.io` signers will never issue certificates with a lifetime
+ // longer than 24 hours.
+ //
+ // +optional
+ // +default=86400
+ optional int32 maxExpirationSeconds = 8;
+
+ // pkixPublicKey is the PKIX-serialized public key the signer will issue the
+ // certificate to.
+ //
+ // The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521,
+ // or ED25519. Note that this list may be expanded in the future.
+ //
+ // Signer implementations do not need to support all key types supported by
+ // kube-apiserver and kubelet. If a signer does not support the key type
+ // used for a given PodCertificateRequest, it must deny the request by
+ // setting a status.conditions entry with a type of "Denied" and a reason of
+ // "UnsupportedKeyType". It may also suggest a key type that it does support
+ // in the message field.
+ //
+ // +required
+ optional bytes pkixPublicKey = 9;
+
+ // proofOfPossession proves that the requesting kubelet holds the private
+ // key corresponding to pkixPublicKey.
+ //
+ // It is contructed by signing the ASCII bytes of the pod's UID using
+ // `pkixPublicKey`.
+ //
+ // kube-apiserver validates the proof of possession during creation of the
+ // PodCertificateRequest.
+ //
+ // If the key is an RSA key, then the signature is over the ASCII bytes of
+ // the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang
+ // function crypto/rsa.SignPSS with nil options).
+ //
+ // If the key is an ECDSA key, then the signature is as described by [SEC 1,
+ // Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the
+ // golang library function crypto/ecdsa.SignASN1)
+ //
+ // If the key is an ED25519 key, the the signature is as described by the
+ // [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by
+ // the golang library crypto/ed25519.Sign).
+ //
+ // +required
+ optional bytes proofOfPossession = 10;
+}
+
+// PodCertificateRequestStatus describes the status of the request, and holds
+// the certificate data if the request is issued.
+message PodCertificateRequestStatus {
+ // conditions applied to the request.
+ //
+ // The types "Issued", "Denied", and "Failed" have special handling. At
+ // most one of these conditions may be present, and they must have status
+ // "True".
+ //
+ // If the request is denied with `Reason=UnsupportedKeyType`, the signer may
+ // suggest a key type that will work in the message field.
+ //
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ // +optional
+ repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
+
+ // certificateChain is populated with an issued certificate by the signer.
+ // This field is set via the /status subresource. Once populated, this field
+ // is immutable.
+ //
+ // If the certificate signing request is denied, a condition of type
+ // "Denied" is added and this field remains empty. If the signer cannot
+ // issue the certificate, a condition of type "Failed" is added and this
+ // field remains empty.
+ //
+ // Validation requirements:
+ // 1. certificateChain must consist of one or more PEM-formatted certificates.
+ // 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as
+ // described in section 4 of RFC5280.
+ //
+ // If more than one block is present, and the definition of the requested
+ // spec.signerName does not indicate otherwise, the first block is the
+ // issued certificate, and subsequent blocks should be treated as
+ // intermediate certificates and presented in TLS handshakes. When
+ // projecting the chain into a pod volume, kubelet will drop any data
+ // in-between the PEM blocks, as well as any PEM block headers.
+ //
+ // +optional
+ optional string certificateChain = 2;
+
+ // notBefore is the time at which the certificate becomes valid. The value
+ // must be the same as the notBefore value in the leaf certificate in
+ // certificateChain. This field is set via the /status subresource. Once
+ // populated, it is immutable. The signer must set this field at the same
+ // time it sets certificateChain.
+ //
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time notBefore = 4;
+
+ // beginRefreshAt is the time at which the kubelet should begin trying to
+ // refresh the certificate. This field is set via the /status subresource,
+ // and must be set at the same time as certificateChain. Once populated,
+ // this field is immutable.
+ //
+ // This field is only a hint. Kubelet may start refreshing before or after
+ // this time if necessary.
+ //
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time beginRefreshAt = 5;
+
+ // notAfter is the time at which the certificate expires. The value must be
+ // the same as the notAfter value in the leaf certificate in
+ // certificateChain. This field is set via the /status subresource. Once
+ // populated, it is immutable. The signer must set this field at the same
+ // time it sets certificateChain.
+ //
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time notAfter = 6;
+}
+
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/register.go b/vendor/k8s.io/api/certificates/v1alpha1/register.go
index 7288ed9a3..ae541e15c 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/register.go
+++ b/vendor/k8s.io/api/certificates/v1alpha1/register.go
@@ -53,6 +53,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&ClusterTrustBundle{},
&ClusterTrustBundleList{},
+ &PodCertificateRequest{},
+ &PodCertificateRequestList{},
)
// Add the watch version that applies
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types.go b/vendor/k8s.io/api/certificates/v1alpha1/types.go
index beef02599..98e4c68d4 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/types.go
+++ b/vendor/k8s.io/api/certificates/v1alpha1/types.go
@@ -18,6 +18,7 @@ package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
)
// +genclient
@@ -106,3 +107,233 @@ type ClusterTrustBundleList struct {
// items is a collection of ClusterTrustBundle objects
Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"`
}
+
+// +genclient
+// +k8s:prerelease-lifecycle-gen:introduced=1.32
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodCertificateRequest encodes a pod requesting a certificate from a given
+// signer.
+//
+// Kubelets use this API to implement podCertificate projected volumes
+type PodCertificateRequest struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata contains the object metadata.
+ //
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // spec contains the details about the certificate being requested.
+ Spec PodCertificateRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+ // status contains the issued certificate, and a standard set of conditions.
+ // +optional
+ Status PodCertificateRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// PodCertificateRequestSpec describes the certificate request. All fields are
+// immutable after creation.
+type PodCertificateRequestSpec struct {
+ // signerName indicates the requested signer.
+ //
+ // All signer names beginning with `kubernetes.io` are reserved for use by
+ // the Kubernetes project. There is currently one well-known signer
+ // documented by the Kubernetes project,
+ // `kubernetes.io/kube-apiserver-client-pod`, which will issue client
+ // certificates understood by kube-apiserver. It is currently
+ // unimplemented.
+ //
+ // +required
+ SignerName string `json:"signerName" protobuf:"bytes,1,opt,name=signerName"`
+
+ // podName is the name of the pod into which the certificate will be mounted.
+ //
+ // +required
+ PodName string `json:"podName" protobuf:"bytes,2,opt,name=podName"`
+ // podUID is the UID of the pod into which the certificate will be mounted.
+ //
+ // +required
+ PodUID types.UID `json:"podUID" protobuf:"bytes,3,opt,name=podUID"`
+
+ // serviceAccountName is the name of the service account the pod is running as.
+ //
+ // +required
+ ServiceAccountName string `json:"serviceAccountName" protobuf:"bytes,4,opt,name=serviceAccountName"`
+ // serviceAccountUID is the UID of the service account the pod is running as.
+ //
+ // +required
+ ServiceAccountUID types.UID `json:"serviceAccountUID" protobuf:"bytes,5,opt,name=serviceAccountUID"`
+
+ // nodeName is the name of the node the pod is assigned to.
+ //
+ // +required
+ NodeName types.NodeName `json:"nodeName" protobuf:"bytes,6,opt,name=nodeName"`
+ // nodeUID is the UID of the node the pod is assigned to.
+ //
+ // +required
+ NodeUID types.UID `json:"nodeUID" protobuf:"bytes,7,opt,name=nodeUID"`
+
+ // maxExpirationSeconds is the maximum lifetime permitted for the
+ // certificate.
+ //
+ // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
+ // will reject values shorter than 3600 (1 hour). The maximum allowable
+ // value is 7862400 (91 days).
+ //
+ // The signer implementation is then free to issue a certificate with any
+ // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
+ // seconds (1 hour). This constraint is enforced by kube-apiserver.
+ // `kubernetes.io` signers will never issue certificates with a lifetime
+ // longer than 24 hours.
+ //
+ // +optional
+ // +default=86400
+ MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty" protobuf:"varint,8,opt,name=maxExpirationSeconds"`
+
+ // pkixPublicKey is the PKIX-serialized public key the signer will issue the
+ // certificate to.
+ //
+ // The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521,
+ // or ED25519. Note that this list may be expanded in the future.
+ //
+ // Signer implementations do not need to support all key types supported by
+ // kube-apiserver and kubelet. If a signer does not support the key type
+ // used for a given PodCertificateRequest, it must deny the request by
+ // setting a status.conditions entry with a type of "Denied" and a reason of
+ // "UnsupportedKeyType". It may also suggest a key type that it does support
+ // in the message field.
+ //
+ // +required
+ PKIXPublicKey []byte `json:"pkixPublicKey" protobuf:"bytes,9,opt,name=pkixPublicKey"`
+
+ // proofOfPossession proves that the requesting kubelet holds the private
+ // key corresponding to pkixPublicKey.
+ //
+ // It is contructed by signing the ASCII bytes of the pod's UID using
+ // `pkixPublicKey`.
+ //
+ // kube-apiserver validates the proof of possession during creation of the
+ // PodCertificateRequest.
+ //
+ // If the key is an RSA key, then the signature is over the ASCII bytes of
+ // the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang
+ // function crypto/rsa.SignPSS with nil options).
+ //
+ // If the key is an ECDSA key, then the signature is as described by [SEC 1,
+ // Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the
+ // golang library function crypto/ecdsa.SignASN1)
+ //
+ // If the key is an ED25519 key, the the signature is as described by the
+ // [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by
+ // the golang library crypto/ed25519.Sign).
+ //
+ // +required
+ ProofOfPossession []byte `json:"proofOfPossession" protobuf:"bytes,10,opt,name=proofOfPossession"`
+}
+
+// PodCertificateRequestStatus describes the status of the request, and holds
+// the certificate data if the request is issued.
+type PodCertificateRequestStatus struct {
+ // conditions applied to the request.
+ //
+ // The types "Issued", "Denied", and "Failed" have special handling. At
+ // most one of these conditions may be present, and they must have status
+ // "True".
+ //
+ // If the request is denied with `Reason=UnsupportedKeyType`, the signer may
+ // suggest a key type that will work in the message field.
+ //
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+
+ // certificateChain is populated with an issued certificate by the signer.
+ // This field is set via the /status subresource. Once populated, this field
+ // is immutable.
+ //
+ // If the certificate signing request is denied, a condition of type
+ // "Denied" is added and this field remains empty. If the signer cannot
+ // issue the certificate, a condition of type "Failed" is added and this
+ // field remains empty.
+ //
+ // Validation requirements:
+ // 1. certificateChain must consist of one or more PEM-formatted certificates.
+ // 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as
+ // described in section 4 of RFC5280.
+ //
+ // If more than one block is present, and the definition of the requested
+ // spec.signerName does not indicate otherwise, the first block is the
+ // issued certificate, and subsequent blocks should be treated as
+ // intermediate certificates and presented in TLS handshakes. When
+ // projecting the chain into a pod volume, kubelet will drop any data
+ // in-between the PEM blocks, as well as any PEM block headers.
+ //
+ // +optional
+ CertificateChain string `json:"certificateChain,omitempty" protobuf:"bytes,2,opt,name=certificateChain"`
+
+ // notBefore is the time at which the certificate becomes valid. The value
+ // must be the same as the notBefore value in the leaf certificate in
+ // certificateChain. This field is set via the /status subresource. Once
+ // populated, it is immutable. The signer must set this field at the same
+ // time it sets certificateChain.
+ //
+ // +optional
+ NotBefore *metav1.Time `json:"notBefore,omitempty" protobuf:"bytes,4,opt,name=notBefore"`
+
+ // beginRefreshAt is the time at which the kubelet should begin trying to
+ // refresh the certificate. This field is set via the /status subresource,
+ // and must be set at the same time as certificateChain. Once populated,
+ // this field is immutable.
+ //
+ // This field is only a hint. Kubelet may start refreshing before or after
+ // this time if necessary.
+ //
+ // +optional
+ BeginRefreshAt *metav1.Time `json:"beginRefreshAt,omitempty" protobuf:"bytes,5,opt,name=beginRefreshAt"`
+
+ // notAfter is the time at which the certificate expires. The value must be
+ // the same as the notAfter value in the leaf certificate in
+ // certificateChain. This field is set via the /status subresource. Once
+ // populated, it is immutable. The signer must set this field at the same
+ // time it sets certificateChain.
+ //
+ // +optional
+ NotAfter *metav1.Time `json:"notAfter,omitempty" protobuf:"bytes,6,opt,name=notAfter"`
+}
+
+// Well-known condition types for PodCertificateRequests
+const (
+ // Denied indicates the request was denied by the signer.
+ PodCertificateRequestConditionTypeDenied string = "Denied"
+ // Failed indicates the signer failed to issue the certificate.
+ PodCertificateRequestConditionTypeFailed string = "Failed"
+ // Issued indicates the certificate has been issued.
+ PodCertificateRequestConditionTypeIssued string = "Issued"
+)
+
+// Well-known condition reasons for PodCertificateRequests
+const (
+ // UnsupportedKeyType should be set on "Denied" conditions when the signer
+ // doesn't support the key type of publicKey.
+ PodCertificateRequestConditionUnsupportedKeyType string = "UnsupportedKeyType"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.32
+
+// PodCertificateRequestList is a collection of PodCertificateRequest objects
+type PodCertificateRequestList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata contains the list metadata.
+ //
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items is a collection of PodCertificateRequest objects
+ Items []PodCertificateRequest `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go
index bff649e3c..d29f2d850 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go
@@ -57,4 +57,56 @@ func (ClusterTrustBundleSpec) SwaggerDoc() map[string]string {
return map_ClusterTrustBundleSpec
}
+var map_PodCertificateRequest = map[string]string{
+ "": "PodCertificateRequest encodes a pod requesting a certificate from a given signer.\n\nKubelets use this API to implement podCertificate projected volumes",
+ "metadata": "metadata contains the object metadata.",
+ "spec": "spec contains the details about the certificate being requested.",
+ "status": "status contains the issued certificate, and a standard set of conditions.",
+}
+
+func (PodCertificateRequest) SwaggerDoc() map[string]string {
+ return map_PodCertificateRequest
+}
+
+var map_PodCertificateRequestList = map[string]string{
+ "": "PodCertificateRequestList is a collection of PodCertificateRequest objects",
+ "metadata": "metadata contains the list metadata.",
+ "items": "items is a collection of PodCertificateRequest objects",
+}
+
+func (PodCertificateRequestList) SwaggerDoc() map[string]string {
+ return map_PodCertificateRequestList
+}
+
+var map_PodCertificateRequestSpec = map[string]string{
+ "": "PodCertificateRequestSpec describes the certificate request. All fields are immutable after creation.",
+ "signerName": "signerName indicates the requested signer.\n\nAll signer names beginning with `kubernetes.io` are reserved for use by the Kubernetes project. There is currently one well-known signer documented by the Kubernetes project, `kubernetes.io/kube-apiserver-client-pod`, which will issue client certificates understood by kube-apiserver. It is currently unimplemented.",
+ "podName": "podName is the name of the pod into which the certificate will be mounted.",
+ "podUID": "podUID is the UID of the pod into which the certificate will be mounted.",
+ "serviceAccountName": "serviceAccountName is the name of the service account the pod is running as.",
+ "serviceAccountUID": "serviceAccountUID is the UID of the service account the pod is running as.",
+ "nodeName": "nodeName is the name of the node the pod is assigned to.",
+ "nodeUID": "nodeUID is the UID of the node the pod is assigned to.",
+ "maxExpirationSeconds": "maxExpirationSeconds is the maximum lifetime permitted for the certificate.\n\nIf omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour). The maximum allowable value is 7862400 (91 days).\n\nThe signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour). This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours.",
+ "pkixPublicKey": "pkixPublicKey is the PKIX-serialized public key the signer will issue the certificate to.\n\nThe key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521, or ED25519. Note that this list may be expanded in the future.\n\nSigner implementations do not need to support all key types supported by kube-apiserver and kubelet. If a signer does not support the key type used for a given PodCertificateRequest, it must deny the request by setting a status.conditions entry with a type of \"Denied\" and a reason of \"UnsupportedKeyType\". It may also suggest a key type that it does support in the message field.",
+ "proofOfPossession": "proofOfPossession proves that the requesting kubelet holds the private key corresponding to pkixPublicKey.\n\nIt is contructed by signing the ASCII bytes of the pod's UID using `pkixPublicKey`.\n\nkube-apiserver validates the proof of possession during creation of the PodCertificateRequest.\n\nIf the key is an RSA key, then the signature is over the ASCII bytes of the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang function crypto/rsa.SignPSS with nil options).\n\nIf the key is an ECDSA key, then the signature is as described by [SEC 1, Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the golang library function crypto/ecdsa.SignASN1)\n\nIf the key is an ED25519 key, the the signature is as described by the [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by the golang library crypto/ed25519.Sign).",
+}
+
+func (PodCertificateRequestSpec) SwaggerDoc() map[string]string {
+ return map_PodCertificateRequestSpec
+}
+
+var map_PodCertificateRequestStatus = map[string]string{
+ "": "PodCertificateRequestStatus describes the status of the request, and holds the certificate data if the request is issued.",
+ "conditions": "conditions applied to the request.\n\nThe types \"Issued\", \"Denied\", and \"Failed\" have special handling. At most one of these conditions may be present, and they must have status \"True\".\n\nIf the request is denied with `Reason=UnsupportedKeyType`, the signer may suggest a key type that will work in the message field.",
+ "certificateChain": "certificateChain is populated with an issued certificate by the signer. This field is set via the /status subresource. Once populated, this field is immutable.\n\nIf the certificate signing request is denied, a condition of type \"Denied\" is added and this field remains empty. If the signer cannot issue the certificate, a condition of type \"Failed\" is added and this field remains empty.\n\nValidation requirements:\n 1. certificateChain must consist of one or more PEM-formatted certificates.\n 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as\n described in section 4 of RFC5280.\n\nIf more than one block is present, and the definition of the requested spec.signerName does not indicate otherwise, the first block is the issued certificate, and subsequent blocks should be treated as intermediate certificates and presented in TLS handshakes. When projecting the chain into a pod volume, kubelet will drop any data in-between the PEM blocks, as well as any PEM block headers.",
+ "notBefore": "notBefore is the time at which the certificate becomes valid. The value must be the same as the notBefore value in the leaf certificate in certificateChain. This field is set via the /status subresource. Once populated, it is immutable. The signer must set this field at the same time it sets certificateChain.",
+ "beginRefreshAt": "beginRefreshAt is the time at which the kubelet should begin trying to refresh the certificate. This field is set via the /status subresource, and must be set at the same time as certificateChain. Once populated, this field is immutable.\n\nThis field is only a hint. Kubelet may start refreshing before or after this time if necessary.",
+ "notAfter": "notAfter is the time at which the certificate expires. The value must be the same as the notAfter value in the leaf certificate in certificateChain. This field is set via the /status subresource. Once populated, it is immutable. The signer must set this field at the same time it sets certificateChain.",
+}
+
+func (PodCertificateRequestStatus) SwaggerDoc() map[string]string {
+ return map_PodCertificateRequestStatus
+}
+
// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go
index 30a4dc1e8..25bc0ed6c 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go
@@ -22,6 +22,7 @@ limitations under the License.
package v1alpha1
import (
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -100,3 +101,130 @@ func (in *ClusterTrustBundleSpec) DeepCopy() *ClusterTrustBundleSpec {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCertificateRequest) DeepCopyInto(out *PodCertificateRequest) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequest.
+func (in *PodCertificateRequest) DeepCopy() *PodCertificateRequest {
+ if in == nil {
+ return nil
+ }
+ out := new(PodCertificateRequest)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodCertificateRequest) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCertificateRequestList) DeepCopyInto(out *PodCertificateRequestList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]PodCertificateRequest, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestList.
+func (in *PodCertificateRequestList) DeepCopy() *PodCertificateRequestList {
+ if in == nil {
+ return nil
+ }
+ out := new(PodCertificateRequestList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodCertificateRequestList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCertificateRequestSpec) DeepCopyInto(out *PodCertificateRequestSpec) {
+ *out = *in
+ if in.MaxExpirationSeconds != nil {
+ in, out := &in.MaxExpirationSeconds, &out.MaxExpirationSeconds
+ *out = new(int32)
+ **out = **in
+ }
+ if in.PKIXPublicKey != nil {
+ in, out := &in.PKIXPublicKey, &out.PKIXPublicKey
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ if in.ProofOfPossession != nil {
+ in, out := &in.ProofOfPossession, &out.ProofOfPossession
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestSpec.
+func (in *PodCertificateRequestSpec) DeepCopy() *PodCertificateRequestSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PodCertificateRequestSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCertificateRequestStatus) DeepCopyInto(out *PodCertificateRequestStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.NotBefore != nil {
+ in, out := &in.NotBefore, &out.NotBefore
+ *out = (*in).DeepCopy()
+ }
+ if in.BeginRefreshAt != nil {
+ in, out := &in.BeginRefreshAt, &out.BeginRefreshAt
+ *out = (*in).DeepCopy()
+ }
+ if in.NotAfter != nil {
+ in, out := &in.NotAfter, &out.NotAfter
+ *out = (*in).DeepCopy()
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestStatus.
+func (in *PodCertificateRequestStatus) DeepCopy() *PodCertificateRequestStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PodCertificateRequestStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go
index 3121a87d0..2749e4297 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go
+++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go
@@ -56,3 +56,39 @@ func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) {
func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) {
return 1, 37
}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PodCertificateRequest) APILifecycleIntroduced() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *PodCertificateRequest) APILifecycleDeprecated() (major, minor int) {
+ return 1, 35
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *PodCertificateRequest) APILifecycleRemoved() (major, minor int) {
+ return 1, 38
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PodCertificateRequestList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *PodCertificateRequestList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 35
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *PodCertificateRequestList) APILifecycleRemoved() (major, minor int) {
+ return 1, 38
+}
diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto
index 7c48270f6..4c9385c19 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto
@@ -30,6 +30,8 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
option go_package = "k8s.io/api/certificates/v1beta1";
// Describes a certificate signing request
+// +k8s:supportsSubresource=/status
+// +k8s:supportsSubresource=/approval
message CertificateSigningRequest {
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@@ -182,6 +184,11 @@ message CertificateSigningRequestStatus {
// +listType=map
// +listMapKey=type
// +optional
+ // +k8s:listType=map
+ // +k8s:listMapKey=type
+ // +k8s:optional
+ // +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
+ // +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
repeated CertificateSigningRequestCondition conditions = 1;
// If request was approved, the controller will place the issued certificate here.
diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go
index 1ce104807..fadb7e082 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/types.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/types.go
@@ -31,6 +31,8 @@ import (
// +k8s:prerelease-lifecycle-gen:replacement=certificates.k8s.io,v1,CertificateSigningRequest
// Describes a certificate signing request
+// +k8s:supportsSubresource=/status
+// +k8s:supportsSubresource=/approval
type CertificateSigningRequest struct {
metav1.TypeMeta `json:",inline"`
// +optional
@@ -175,6 +177,11 @@ type CertificateSigningRequestStatus struct {
// +listType=map
// +listMapKey=type
// +optional
+ // +k8s:listType=map
+ // +k8s:listMapKey=type
+ // +k8s:optional
+ // +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
+ // +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
// If request was approved, the controller will place the issued certificate here.
diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go
index a4b8f5842..2ece4c209 100644
--- a/vendor/k8s.io/api/core/v1/generated.pb.go
+++ b/vendor/k8s.io/api/core/v1/generated.pb.go
@@ -1729,10 +1729,38 @@ func (m *FCVolumeSource) XXX_DiscardUnknown() {
var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo
+func (m *FileKeySelector) Reset() { *m = FileKeySelector{} }
+func (*FileKeySelector) ProtoMessage() {}
+func (*FileKeySelector) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6c07b07c062484ab, []int{60}
+}
+func (m *FileKeySelector) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *FileKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *FileKeySelector) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FileKeySelector.Merge(m, src)
+}
+func (m *FileKeySelector) XXX_Size() int {
+ return m.Size()
+}
+func (m *FileKeySelector) XXX_DiscardUnknown() {
+ xxx_messageInfo_FileKeySelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileKeySelector proto.InternalMessageInfo
+
func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} }
func (*FlexPersistentVolumeSource) ProtoMessage() {}
func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{60}
+ return fileDescriptor_6c07b07c062484ab, []int{61}
}
func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1760,7 +1788,7 @@ var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo
func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} }
func (*FlexVolumeSource) ProtoMessage() {}
func (*FlexVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{61}
+ return fileDescriptor_6c07b07c062484ab, []int{62}
}
func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1788,7 +1816,7 @@ var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo
func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} }
func (*FlockerVolumeSource) ProtoMessage() {}
func (*FlockerVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{62}
+ return fileDescriptor_6c07b07c062484ab, []int{63}
}
func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1816,7 +1844,7 @@ var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo
func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} }
func (*GCEPersistentDiskVolumeSource) ProtoMessage() {}
func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{63}
+ return fileDescriptor_6c07b07c062484ab, []int{64}
}
func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1844,7 +1872,7 @@ var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo
func (m *GRPCAction) Reset() { *m = GRPCAction{} }
func (*GRPCAction) ProtoMessage() {}
func (*GRPCAction) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{64}
+ return fileDescriptor_6c07b07c062484ab, []int{65}
}
func (m *GRPCAction) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1872,7 +1900,7 @@ var xxx_messageInfo_GRPCAction proto.InternalMessageInfo
func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} }
func (*GitRepoVolumeSource) ProtoMessage() {}
func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{65}
+ return fileDescriptor_6c07b07c062484ab, []int{66}
}
func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1900,7 +1928,7 @@ var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo
func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} }
func (*GlusterfsPersistentVolumeSource) ProtoMessage() {}
func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{66}
+ return fileDescriptor_6c07b07c062484ab, []int{67}
}
func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1928,7 +1956,7 @@ var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo
func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} }
func (*GlusterfsVolumeSource) ProtoMessage() {}
func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{67}
+ return fileDescriptor_6c07b07c062484ab, []int{68}
}
func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1956,7 +1984,7 @@ var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo
func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} }
func (*HTTPGetAction) ProtoMessage() {}
func (*HTTPGetAction) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{68}
+ return fileDescriptor_6c07b07c062484ab, []int{69}
}
func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1984,7 +2012,7 @@ var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo
func (m *HTTPHeader) Reset() { *m = HTTPHeader{} }
func (*HTTPHeader) ProtoMessage() {}
func (*HTTPHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{69}
+ return fileDescriptor_6c07b07c062484ab, []int{70}
}
func (m *HTTPHeader) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2012,7 +2040,7 @@ var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo
func (m *HostAlias) Reset() { *m = HostAlias{} }
func (*HostAlias) ProtoMessage() {}
func (*HostAlias) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{70}
+ return fileDescriptor_6c07b07c062484ab, []int{71}
}
func (m *HostAlias) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2040,7 +2068,7 @@ var xxx_messageInfo_HostAlias proto.InternalMessageInfo
func (m *HostIP) Reset() { *m = HostIP{} }
func (*HostIP) ProtoMessage() {}
func (*HostIP) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{71}
+ return fileDescriptor_6c07b07c062484ab, []int{72}
}
func (m *HostIP) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2068,7 +2096,7 @@ var xxx_messageInfo_HostIP proto.InternalMessageInfo
func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} }
func (*HostPathVolumeSource) ProtoMessage() {}
func (*HostPathVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{72}
+ return fileDescriptor_6c07b07c062484ab, []int{73}
}
func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2096,7 +2124,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo
func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} }
func (*ISCSIPersistentVolumeSource) ProtoMessage() {}
func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{73}
+ return fileDescriptor_6c07b07c062484ab, []int{74}
}
func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2124,7 +2152,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo
func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} }
func (*ISCSIVolumeSource) ProtoMessage() {}
func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{74}
+ return fileDescriptor_6c07b07c062484ab, []int{75}
}
func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2152,7 +2180,7 @@ var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo
func (m *ImageVolumeSource) Reset() { *m = ImageVolumeSource{} }
func (*ImageVolumeSource) ProtoMessage() {}
func (*ImageVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{75}
+ return fileDescriptor_6c07b07c062484ab, []int{76}
}
func (m *ImageVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2180,7 +2208,7 @@ var xxx_messageInfo_ImageVolumeSource proto.InternalMessageInfo
func (m *KeyToPath) Reset() { *m = KeyToPath{} }
func (*KeyToPath) ProtoMessage() {}
func (*KeyToPath) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{76}
+ return fileDescriptor_6c07b07c062484ab, []int{77}
}
func (m *KeyToPath) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2208,7 +2236,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo
func (m *Lifecycle) Reset() { *m = Lifecycle{} }
func (*Lifecycle) ProtoMessage() {}
func (*Lifecycle) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{77}
+ return fileDescriptor_6c07b07c062484ab, []int{78}
}
func (m *Lifecycle) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2236,7 +2264,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo
func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} }
func (*LifecycleHandler) ProtoMessage() {}
func (*LifecycleHandler) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{78}
+ return fileDescriptor_6c07b07c062484ab, []int{79}
}
func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2264,7 +2292,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo
func (m *LimitRange) Reset() { *m = LimitRange{} }
func (*LimitRange) ProtoMessage() {}
func (*LimitRange) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{79}
+ return fileDescriptor_6c07b07c062484ab, []int{80}
}
func (m *LimitRange) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2292,7 +2320,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo
func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} }
func (*LimitRangeItem) ProtoMessage() {}
func (*LimitRangeItem) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{80}
+ return fileDescriptor_6c07b07c062484ab, []int{81}
}
func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2320,7 +2348,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo
func (m *LimitRangeList) Reset() { *m = LimitRangeList{} }
func (*LimitRangeList) ProtoMessage() {}
func (*LimitRangeList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{81}
+ return fileDescriptor_6c07b07c062484ab, []int{82}
}
func (m *LimitRangeList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2348,7 +2376,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo
func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} }
func (*LimitRangeSpec) ProtoMessage() {}
func (*LimitRangeSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{82}
+ return fileDescriptor_6c07b07c062484ab, []int{83}
}
func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2376,7 +2404,7 @@ var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo
func (m *LinuxContainerUser) Reset() { *m = LinuxContainerUser{} }
func (*LinuxContainerUser) ProtoMessage() {}
func (*LinuxContainerUser) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{83}
+ return fileDescriptor_6c07b07c062484ab, []int{84}
}
func (m *LinuxContainerUser) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2404,7 +2432,7 @@ var xxx_messageInfo_LinuxContainerUser proto.InternalMessageInfo
func (m *List) Reset() { *m = List{} }
func (*List) ProtoMessage() {}
func (*List) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{84}
+ return fileDescriptor_6c07b07c062484ab, []int{85}
}
func (m *List) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2432,7 +2460,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo
func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} }
func (*LoadBalancerIngress) ProtoMessage() {}
func (*LoadBalancerIngress) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{85}
+ return fileDescriptor_6c07b07c062484ab, []int{86}
}
func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2460,7 +2488,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo
func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} }
func (*LoadBalancerStatus) ProtoMessage() {}
func (*LoadBalancerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{86}
+ return fileDescriptor_6c07b07c062484ab, []int{87}
}
func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2488,7 +2516,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo
func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} }
func (*LocalObjectReference) ProtoMessage() {}
func (*LocalObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{87}
+ return fileDescriptor_6c07b07c062484ab, []int{88}
}
func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2516,7 +2544,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo
func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} }
func (*LocalVolumeSource) ProtoMessage() {}
func (*LocalVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{88}
+ return fileDescriptor_6c07b07c062484ab, []int{89}
}
func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2544,7 +2572,7 @@ var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo
func (m *ModifyVolumeStatus) Reset() { *m = ModifyVolumeStatus{} }
func (*ModifyVolumeStatus) ProtoMessage() {}
func (*ModifyVolumeStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{89}
+ return fileDescriptor_6c07b07c062484ab, []int{90}
}
func (m *ModifyVolumeStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2572,7 +2600,7 @@ var xxx_messageInfo_ModifyVolumeStatus proto.InternalMessageInfo
func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} }
func (*NFSVolumeSource) ProtoMessage() {}
func (*NFSVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{90}
+ return fileDescriptor_6c07b07c062484ab, []int{91}
}
func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2600,7 +2628,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo
func (m *Namespace) Reset() { *m = Namespace{} }
func (*Namespace) ProtoMessage() {}
func (*Namespace) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{91}
+ return fileDescriptor_6c07b07c062484ab, []int{92}
}
func (m *Namespace) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2628,7 +2656,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo
func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} }
func (*NamespaceCondition) ProtoMessage() {}
func (*NamespaceCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{92}
+ return fileDescriptor_6c07b07c062484ab, []int{93}
}
func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2656,7 +2684,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo
func (m *NamespaceList) Reset() { *m = NamespaceList{} }
func (*NamespaceList) ProtoMessage() {}
func (*NamespaceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{93}
+ return fileDescriptor_6c07b07c062484ab, []int{94}
}
func (m *NamespaceList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2684,7 +2712,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo
func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} }
func (*NamespaceSpec) ProtoMessage() {}
func (*NamespaceSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{94}
+ return fileDescriptor_6c07b07c062484ab, []int{95}
}
func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2712,7 +2740,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo
func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} }
func (*NamespaceStatus) ProtoMessage() {}
func (*NamespaceStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{95}
+ return fileDescriptor_6c07b07c062484ab, []int{96}
}
func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2740,7 +2768,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo
func (m *Node) Reset() { *m = Node{} }
func (*Node) ProtoMessage() {}
func (*Node) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{96}
+ return fileDescriptor_6c07b07c062484ab, []int{97}
}
func (m *Node) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2768,7 +2796,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo
func (m *NodeAddress) Reset() { *m = NodeAddress{} }
func (*NodeAddress) ProtoMessage() {}
func (*NodeAddress) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{97}
+ return fileDescriptor_6c07b07c062484ab, []int{98}
}
func (m *NodeAddress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2796,7 +2824,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo
func (m *NodeAffinity) Reset() { *m = NodeAffinity{} }
func (*NodeAffinity) ProtoMessage() {}
func (*NodeAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{98}
+ return fileDescriptor_6c07b07c062484ab, []int{99}
}
func (m *NodeAffinity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2824,7 +2852,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo
func (m *NodeCondition) Reset() { *m = NodeCondition{} }
func (*NodeCondition) ProtoMessage() {}
func (*NodeCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{99}
+ return fileDescriptor_6c07b07c062484ab, []int{100}
}
func (m *NodeCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2852,7 +2880,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo
func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} }
func (*NodeConfigSource) ProtoMessage() {}
func (*NodeConfigSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{100}
+ return fileDescriptor_6c07b07c062484ab, []int{101}
}
func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2880,7 +2908,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo
func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} }
func (*NodeConfigStatus) ProtoMessage() {}
func (*NodeConfigStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{101}
+ return fileDescriptor_6c07b07c062484ab, []int{102}
}
func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2908,7 +2936,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo
func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} }
func (*NodeDaemonEndpoints) ProtoMessage() {}
func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{102}
+ return fileDescriptor_6c07b07c062484ab, []int{103}
}
func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2936,7 +2964,7 @@ var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo
func (m *NodeFeatures) Reset() { *m = NodeFeatures{} }
func (*NodeFeatures) ProtoMessage() {}
func (*NodeFeatures) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{103}
+ return fileDescriptor_6c07b07c062484ab, []int{104}
}
func (m *NodeFeatures) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2964,7 +2992,7 @@ var xxx_messageInfo_NodeFeatures proto.InternalMessageInfo
func (m *NodeList) Reset() { *m = NodeList{} }
func (*NodeList) ProtoMessage() {}
func (*NodeList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{104}
+ return fileDescriptor_6c07b07c062484ab, []int{105}
}
func (m *NodeList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2992,7 +3020,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo
func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} }
func (*NodeProxyOptions) ProtoMessage() {}
func (*NodeProxyOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{105}
+ return fileDescriptor_6c07b07c062484ab, []int{106}
}
func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3020,7 +3048,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo
func (m *NodeRuntimeHandler) Reset() { *m = NodeRuntimeHandler{} }
func (*NodeRuntimeHandler) ProtoMessage() {}
func (*NodeRuntimeHandler) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{106}
+ return fileDescriptor_6c07b07c062484ab, []int{107}
}
func (m *NodeRuntimeHandler) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3048,7 +3076,7 @@ var xxx_messageInfo_NodeRuntimeHandler proto.InternalMessageInfo
func (m *NodeRuntimeHandlerFeatures) Reset() { *m = NodeRuntimeHandlerFeatures{} }
func (*NodeRuntimeHandlerFeatures) ProtoMessage() {}
func (*NodeRuntimeHandlerFeatures) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{107}
+ return fileDescriptor_6c07b07c062484ab, []int{108}
}
func (m *NodeRuntimeHandlerFeatures) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3076,7 +3104,7 @@ var xxx_messageInfo_NodeRuntimeHandlerFeatures proto.InternalMessageInfo
func (m *NodeSelector) Reset() { *m = NodeSelector{} }
func (*NodeSelector) ProtoMessage() {}
func (*NodeSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{108}
+ return fileDescriptor_6c07b07c062484ab, []int{109}
}
func (m *NodeSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3104,7 +3132,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo
func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} }
func (*NodeSelectorRequirement) ProtoMessage() {}
func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{109}
+ return fileDescriptor_6c07b07c062484ab, []int{110}
}
func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3132,7 +3160,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo
func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} }
func (*NodeSelectorTerm) ProtoMessage() {}
func (*NodeSelectorTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{110}
+ return fileDescriptor_6c07b07c062484ab, []int{111}
}
func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3160,7 +3188,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo
func (m *NodeSpec) Reset() { *m = NodeSpec{} }
func (*NodeSpec) ProtoMessage() {}
func (*NodeSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{111}
+ return fileDescriptor_6c07b07c062484ab, []int{112}
}
func (m *NodeSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3188,7 +3216,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo
func (m *NodeStatus) Reset() { *m = NodeStatus{} }
func (*NodeStatus) ProtoMessage() {}
func (*NodeStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{112}
+ return fileDescriptor_6c07b07c062484ab, []int{113}
}
func (m *NodeStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3216,7 +3244,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo
func (m *NodeSwapStatus) Reset() { *m = NodeSwapStatus{} }
func (*NodeSwapStatus) ProtoMessage() {}
func (*NodeSwapStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{113}
+ return fileDescriptor_6c07b07c062484ab, []int{114}
}
func (m *NodeSwapStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3244,7 +3272,7 @@ var xxx_messageInfo_NodeSwapStatus proto.InternalMessageInfo
func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} }
func (*NodeSystemInfo) ProtoMessage() {}
func (*NodeSystemInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{114}
+ return fileDescriptor_6c07b07c062484ab, []int{115}
}
func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3272,7 +3300,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo
func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} }
func (*ObjectFieldSelector) ProtoMessage() {}
func (*ObjectFieldSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{115}
+ return fileDescriptor_6c07b07c062484ab, []int{116}
}
func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3300,7 +3328,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo
func (m *ObjectReference) Reset() { *m = ObjectReference{} }
func (*ObjectReference) ProtoMessage() {}
func (*ObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{116}
+ return fileDescriptor_6c07b07c062484ab, []int{117}
}
func (m *ObjectReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3328,7 +3356,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo
func (m *PersistentVolume) Reset() { *m = PersistentVolume{} }
func (*PersistentVolume) ProtoMessage() {}
func (*PersistentVolume) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{117}
+ return fileDescriptor_6c07b07c062484ab, []int{118}
}
func (m *PersistentVolume) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3356,7 +3384,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo
func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} }
func (*PersistentVolumeClaim) ProtoMessage() {}
func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{118}
+ return fileDescriptor_6c07b07c062484ab, []int{119}
}
func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3384,7 +3412,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo
func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} }
func (*PersistentVolumeClaimCondition) ProtoMessage() {}
func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{119}
+ return fileDescriptor_6c07b07c062484ab, []int{120}
}
func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3412,7 +3440,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo
func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} }
func (*PersistentVolumeClaimList) ProtoMessage() {}
func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{120}
+ return fileDescriptor_6c07b07c062484ab, []int{121}
}
func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3440,7 +3468,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo
func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} }
func (*PersistentVolumeClaimSpec) ProtoMessage() {}
func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{121}
+ return fileDescriptor_6c07b07c062484ab, []int{122}
}
func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3468,7 +3496,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo
func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} }
func (*PersistentVolumeClaimStatus) ProtoMessage() {}
func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{122}
+ return fileDescriptor_6c07b07c062484ab, []int{123}
}
func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3496,7 +3524,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo
func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} }
func (*PersistentVolumeClaimTemplate) ProtoMessage() {}
func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{123}
+ return fileDescriptor_6c07b07c062484ab, []int{124}
}
func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3524,7 +3552,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo
func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} }
func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {}
func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{124}
+ return fileDescriptor_6c07b07c062484ab, []int{125}
}
func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3552,7 +3580,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo
func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} }
func (*PersistentVolumeList) ProtoMessage() {}
func (*PersistentVolumeList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{125}
+ return fileDescriptor_6c07b07c062484ab, []int{126}
}
func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3580,7 +3608,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo
func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} }
func (*PersistentVolumeSource) ProtoMessage() {}
func (*PersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{126}
+ return fileDescriptor_6c07b07c062484ab, []int{127}
}
func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3608,7 +3636,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo
func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} }
func (*PersistentVolumeSpec) ProtoMessage() {}
func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{127}
+ return fileDescriptor_6c07b07c062484ab, []int{128}
}
func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3636,7 +3664,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo
func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} }
func (*PersistentVolumeStatus) ProtoMessage() {}
func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{128}
+ return fileDescriptor_6c07b07c062484ab, []int{129}
}
func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3664,7 +3692,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo
func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} }
func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {}
func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{129}
+ return fileDescriptor_6c07b07c062484ab, []int{130}
}
func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3692,7 +3720,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo
func (m *Pod) Reset() { *m = Pod{} }
func (*Pod) ProtoMessage() {}
func (*Pod) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{130}
+ return fileDescriptor_6c07b07c062484ab, []int{131}
}
func (m *Pod) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3720,7 +3748,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo
func (m *PodAffinity) Reset() { *m = PodAffinity{} }
func (*PodAffinity) ProtoMessage() {}
func (*PodAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{131}
+ return fileDescriptor_6c07b07c062484ab, []int{132}
}
func (m *PodAffinity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3748,7 +3776,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo
func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} }
func (*PodAffinityTerm) ProtoMessage() {}
func (*PodAffinityTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{132}
+ return fileDescriptor_6c07b07c062484ab, []int{133}
}
func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3776,7 +3804,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo
func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} }
func (*PodAntiAffinity) ProtoMessage() {}
func (*PodAntiAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{133}
+ return fileDescriptor_6c07b07c062484ab, []int{134}
}
func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3804,7 +3832,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo
func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} }
func (*PodAttachOptions) ProtoMessage() {}
func (*PodAttachOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{134}
+ return fileDescriptor_6c07b07c062484ab, []int{135}
}
func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3829,10 +3857,38 @@ func (m *PodAttachOptions) XXX_DiscardUnknown() {
var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo
+func (m *PodCertificateProjection) Reset() { *m = PodCertificateProjection{} }
+func (*PodCertificateProjection) ProtoMessage() {}
+func (*PodCertificateProjection) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6c07b07c062484ab, []int{136}
+}
+func (m *PodCertificateProjection) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PodCertificateProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *PodCertificateProjection) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PodCertificateProjection.Merge(m, src)
+}
+func (m *PodCertificateProjection) XXX_Size() int {
+ return m.Size()
+}
+func (m *PodCertificateProjection) XXX_DiscardUnknown() {
+ xxx_messageInfo_PodCertificateProjection.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodCertificateProjection proto.InternalMessageInfo
+
func (m *PodCondition) Reset() { *m = PodCondition{} }
func (*PodCondition) ProtoMessage() {}
func (*PodCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{135}
+ return fileDescriptor_6c07b07c062484ab, []int{137}
}
func (m *PodCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3860,7 +3916,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo
func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} }
func (*PodDNSConfig) ProtoMessage() {}
func (*PodDNSConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{136}
+ return fileDescriptor_6c07b07c062484ab, []int{138}
}
func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3888,7 +3944,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo
func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} }
func (*PodDNSConfigOption) ProtoMessage() {}
func (*PodDNSConfigOption) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{137}
+ return fileDescriptor_6c07b07c062484ab, []int{139}
}
func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3916,7 +3972,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo
func (m *PodExecOptions) Reset() { *m = PodExecOptions{} }
func (*PodExecOptions) ProtoMessage() {}
func (*PodExecOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{138}
+ return fileDescriptor_6c07b07c062484ab, []int{140}
}
func (m *PodExecOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3944,7 +4000,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo
func (m *PodIP) Reset() { *m = PodIP{} }
func (*PodIP) ProtoMessage() {}
func (*PodIP) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{139}
+ return fileDescriptor_6c07b07c062484ab, []int{141}
}
func (m *PodIP) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3972,7 +4028,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo
func (m *PodList) Reset() { *m = PodList{} }
func (*PodList) ProtoMessage() {}
func (*PodList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{140}
+ return fileDescriptor_6c07b07c062484ab, []int{142}
}
func (m *PodList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4000,7 +4056,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo
func (m *PodLogOptions) Reset() { *m = PodLogOptions{} }
func (*PodLogOptions) ProtoMessage() {}
func (*PodLogOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{141}
+ return fileDescriptor_6c07b07c062484ab, []int{143}
}
func (m *PodLogOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4028,7 +4084,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo
func (m *PodOS) Reset() { *m = PodOS{} }
func (*PodOS) ProtoMessage() {}
func (*PodOS) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{142}
+ return fileDescriptor_6c07b07c062484ab, []int{144}
}
func (m *PodOS) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4056,7 +4112,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo
func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} }
func (*PodPortForwardOptions) ProtoMessage() {}
func (*PodPortForwardOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{143}
+ return fileDescriptor_6c07b07c062484ab, []int{145}
}
func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4084,7 +4140,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo
func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} }
func (*PodProxyOptions) ProtoMessage() {}
func (*PodProxyOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{144}
+ return fileDescriptor_6c07b07c062484ab, []int{146}
}
func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4112,7 +4168,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo
func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} }
func (*PodReadinessGate) ProtoMessage() {}
func (*PodReadinessGate) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{145}
+ return fileDescriptor_6c07b07c062484ab, []int{147}
}
func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4140,7 +4196,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo
func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} }
func (*PodResourceClaim) ProtoMessage() {}
func (*PodResourceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{146}
+ return fileDescriptor_6c07b07c062484ab, []int{148}
}
func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4168,7 +4224,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo
func (m *PodResourceClaimStatus) Reset() { *m = PodResourceClaimStatus{} }
func (*PodResourceClaimStatus) ProtoMessage() {}
func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{147}
+ return fileDescriptor_6c07b07c062484ab, []int{149}
}
func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4196,7 +4252,7 @@ var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo
func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} }
func (*PodSchedulingGate) ProtoMessage() {}
func (*PodSchedulingGate) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{148}
+ return fileDescriptor_6c07b07c062484ab, []int{150}
}
func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4224,7 +4280,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo
func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} }
func (*PodSecurityContext) ProtoMessage() {}
func (*PodSecurityContext) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{149}
+ return fileDescriptor_6c07b07c062484ab, []int{151}
}
func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4252,7 +4308,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo
func (m *PodSignature) Reset() { *m = PodSignature{} }
func (*PodSignature) ProtoMessage() {}
func (*PodSignature) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{150}
+ return fileDescriptor_6c07b07c062484ab, []int{152}
}
func (m *PodSignature) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4280,7 +4336,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo
func (m *PodSpec) Reset() { *m = PodSpec{} }
func (*PodSpec) ProtoMessage() {}
func (*PodSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{151}
+ return fileDescriptor_6c07b07c062484ab, []int{153}
}
func (m *PodSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4308,7 +4364,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo
func (m *PodStatus) Reset() { *m = PodStatus{} }
func (*PodStatus) ProtoMessage() {}
func (*PodStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{152}
+ return fileDescriptor_6c07b07c062484ab, []int{154}
}
func (m *PodStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4336,7 +4392,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo
func (m *PodStatusResult) Reset() { *m = PodStatusResult{} }
func (*PodStatusResult) ProtoMessage() {}
func (*PodStatusResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{153}
+ return fileDescriptor_6c07b07c062484ab, []int{155}
}
func (m *PodStatusResult) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4364,7 +4420,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo
func (m *PodTemplate) Reset() { *m = PodTemplate{} }
func (*PodTemplate) ProtoMessage() {}
func (*PodTemplate) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{154}
+ return fileDescriptor_6c07b07c062484ab, []int{156}
}
func (m *PodTemplate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4392,7 +4448,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo
func (m *PodTemplateList) Reset() { *m = PodTemplateList{} }
func (*PodTemplateList) ProtoMessage() {}
func (*PodTemplateList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{155}
+ return fileDescriptor_6c07b07c062484ab, []int{157}
}
func (m *PodTemplateList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4420,7 +4476,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo
func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} }
func (*PodTemplateSpec) ProtoMessage() {}
func (*PodTemplateSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{156}
+ return fileDescriptor_6c07b07c062484ab, []int{158}
}
func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4448,7 +4504,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo
func (m *PortStatus) Reset() { *m = PortStatus{} }
func (*PortStatus) ProtoMessage() {}
func (*PortStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{157}
+ return fileDescriptor_6c07b07c062484ab, []int{159}
}
func (m *PortStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4476,7 +4532,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo
func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} }
func (*PortworxVolumeSource) ProtoMessage() {}
func (*PortworxVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{158}
+ return fileDescriptor_6c07b07c062484ab, []int{160}
}
func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4504,7 +4560,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo
func (m *Preconditions) Reset() { *m = Preconditions{} }
func (*Preconditions) ProtoMessage() {}
func (*Preconditions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{159}
+ return fileDescriptor_6c07b07c062484ab, []int{161}
}
func (m *Preconditions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4532,7 +4588,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo
func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} }
func (*PreferAvoidPodsEntry) ProtoMessage() {}
func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{160}
+ return fileDescriptor_6c07b07c062484ab, []int{162}
}
func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4560,7 +4616,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo
func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} }
func (*PreferredSchedulingTerm) ProtoMessage() {}
func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{161}
+ return fileDescriptor_6c07b07c062484ab, []int{163}
}
func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4588,7 +4644,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo
func (m *Probe) Reset() { *m = Probe{} }
func (*Probe) ProtoMessage() {}
func (*Probe) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{162}
+ return fileDescriptor_6c07b07c062484ab, []int{164}
}
func (m *Probe) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4616,7 +4672,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo
func (m *ProbeHandler) Reset() { *m = ProbeHandler{} }
func (*ProbeHandler) ProtoMessage() {}
func (*ProbeHandler) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{163}
+ return fileDescriptor_6c07b07c062484ab, []int{165}
}
func (m *ProbeHandler) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4644,7 +4700,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo
func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} }
func (*ProjectedVolumeSource) ProtoMessage() {}
func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{164}
+ return fileDescriptor_6c07b07c062484ab, []int{166}
}
func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4672,7 +4728,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo
func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} }
func (*QuobyteVolumeSource) ProtoMessage() {}
func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{165}
+ return fileDescriptor_6c07b07c062484ab, []int{167}
}
func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4700,7 +4756,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo
func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} }
func (*RBDPersistentVolumeSource) ProtoMessage() {}
func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{166}
+ return fileDescriptor_6c07b07c062484ab, []int{168}
}
func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4728,7 +4784,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo
func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} }
func (*RBDVolumeSource) ProtoMessage() {}
func (*RBDVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{167}
+ return fileDescriptor_6c07b07c062484ab, []int{169}
}
func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4756,7 +4812,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo
func (m *RangeAllocation) Reset() { *m = RangeAllocation{} }
func (*RangeAllocation) ProtoMessage() {}
func (*RangeAllocation) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{168}
+ return fileDescriptor_6c07b07c062484ab, []int{170}
}
func (m *RangeAllocation) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4784,7 +4840,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo
func (m *ReplicationController) Reset() { *m = ReplicationController{} }
func (*ReplicationController) ProtoMessage() {}
func (*ReplicationController) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{169}
+ return fileDescriptor_6c07b07c062484ab, []int{171}
}
func (m *ReplicationController) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4812,7 +4868,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo
func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} }
func (*ReplicationControllerCondition) ProtoMessage() {}
func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{170}
+ return fileDescriptor_6c07b07c062484ab, []int{172}
}
func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4840,7 +4896,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo
func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} }
func (*ReplicationControllerList) ProtoMessage() {}
func (*ReplicationControllerList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{171}
+ return fileDescriptor_6c07b07c062484ab, []int{173}
}
func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4868,7 +4924,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo
func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} }
func (*ReplicationControllerSpec) ProtoMessage() {}
func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{172}
+ return fileDescriptor_6c07b07c062484ab, []int{174}
}
func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4896,7 +4952,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo
func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} }
func (*ReplicationControllerStatus) ProtoMessage() {}
func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{173}
+ return fileDescriptor_6c07b07c062484ab, []int{175}
}
func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4924,7 +4980,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo
func (m *ResourceClaim) Reset() { *m = ResourceClaim{} }
func (*ResourceClaim) ProtoMessage() {}
func (*ResourceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{174}
+ return fileDescriptor_6c07b07c062484ab, []int{176}
}
func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4952,7 +5008,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} }
func (*ResourceFieldSelector) ProtoMessage() {}
func (*ResourceFieldSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{175}
+ return fileDescriptor_6c07b07c062484ab, []int{177}
}
func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4980,7 +5036,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo
func (m *ResourceHealth) Reset() { *m = ResourceHealth{} }
func (*ResourceHealth) ProtoMessage() {}
func (*ResourceHealth) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{176}
+ return fileDescriptor_6c07b07c062484ab, []int{178}
}
func (m *ResourceHealth) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5008,7 +5064,7 @@ var xxx_messageInfo_ResourceHealth proto.InternalMessageInfo
func (m *ResourceQuota) Reset() { *m = ResourceQuota{} }
func (*ResourceQuota) ProtoMessage() {}
func (*ResourceQuota) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{177}
+ return fileDescriptor_6c07b07c062484ab, []int{179}
}
func (m *ResourceQuota) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5036,7 +5092,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo
func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} }
func (*ResourceQuotaList) ProtoMessage() {}
func (*ResourceQuotaList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{178}
+ return fileDescriptor_6c07b07c062484ab, []int{180}
}
func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5064,7 +5120,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo
func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} }
func (*ResourceQuotaSpec) ProtoMessage() {}
func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{179}
+ return fileDescriptor_6c07b07c062484ab, []int{181}
}
func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5092,7 +5148,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo
func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} }
func (*ResourceQuotaStatus) ProtoMessage() {}
func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{180}
+ return fileDescriptor_6c07b07c062484ab, []int{182}
}
func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5120,7 +5176,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo
func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} }
func (*ResourceRequirements) ProtoMessage() {}
func (*ResourceRequirements) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{181}
+ return fileDescriptor_6c07b07c062484ab, []int{183}
}
func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5148,7 +5204,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo
func (m *ResourceStatus) Reset() { *m = ResourceStatus{} }
func (*ResourceStatus) ProtoMessage() {}
func (*ResourceStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{182}
+ return fileDescriptor_6c07b07c062484ab, []int{184}
}
func (m *ResourceStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5176,7 +5232,7 @@ var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo
func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} }
func (*SELinuxOptions) ProtoMessage() {}
func (*SELinuxOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{183}
+ return fileDescriptor_6c07b07c062484ab, []int{185}
}
func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5204,7 +5260,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo
func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} }
func (*ScaleIOPersistentVolumeSource) ProtoMessage() {}
func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{184}
+ return fileDescriptor_6c07b07c062484ab, []int{186}
}
func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5232,7 +5288,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo
func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} }
func (*ScaleIOVolumeSource) ProtoMessage() {}
func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{185}
+ return fileDescriptor_6c07b07c062484ab, []int{187}
}
func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5260,7 +5316,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo
func (m *ScopeSelector) Reset() { *m = ScopeSelector{} }
func (*ScopeSelector) ProtoMessage() {}
func (*ScopeSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{186}
+ return fileDescriptor_6c07b07c062484ab, []int{188}
}
func (m *ScopeSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5288,7 +5344,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo
func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} }
func (*ScopedResourceSelectorRequirement) ProtoMessage() {}
func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{187}
+ return fileDescriptor_6c07b07c062484ab, []int{189}
}
func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5316,7 +5372,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo
func (m *SeccompProfile) Reset() { *m = SeccompProfile{} }
func (*SeccompProfile) ProtoMessage() {}
func (*SeccompProfile) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{188}
+ return fileDescriptor_6c07b07c062484ab, []int{190}
}
func (m *SeccompProfile) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5344,7 +5400,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo
func (m *Secret) Reset() { *m = Secret{} }
func (*Secret) ProtoMessage() {}
func (*Secret) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{189}
+ return fileDescriptor_6c07b07c062484ab, []int{191}
}
func (m *Secret) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5372,7 +5428,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo
func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} }
func (*SecretEnvSource) ProtoMessage() {}
func (*SecretEnvSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{190}
+ return fileDescriptor_6c07b07c062484ab, []int{192}
}
func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5400,7 +5456,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo
func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} }
func (*SecretKeySelector) ProtoMessage() {}
func (*SecretKeySelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{191}
+ return fileDescriptor_6c07b07c062484ab, []int{193}
}
func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5428,7 +5484,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo
func (m *SecretList) Reset() { *m = SecretList{} }
func (*SecretList) ProtoMessage() {}
func (*SecretList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{192}
+ return fileDescriptor_6c07b07c062484ab, []int{194}
}
func (m *SecretList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5456,7 +5512,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo
func (m *SecretProjection) Reset() { *m = SecretProjection{} }
func (*SecretProjection) ProtoMessage() {}
func (*SecretProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{193}
+ return fileDescriptor_6c07b07c062484ab, []int{195}
}
func (m *SecretProjection) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5484,7 +5540,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo
func (m *SecretReference) Reset() { *m = SecretReference{} }
func (*SecretReference) ProtoMessage() {}
func (*SecretReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{194}
+ return fileDescriptor_6c07b07c062484ab, []int{196}
}
func (m *SecretReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5512,7 +5568,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo
func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} }
func (*SecretVolumeSource) ProtoMessage() {}
func (*SecretVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{195}
+ return fileDescriptor_6c07b07c062484ab, []int{197}
}
func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5540,7 +5596,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo
func (m *SecurityContext) Reset() { *m = SecurityContext{} }
func (*SecurityContext) ProtoMessage() {}
func (*SecurityContext) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{196}
+ return fileDescriptor_6c07b07c062484ab, []int{198}
}
func (m *SecurityContext) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5568,7 +5624,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo
func (m *SerializedReference) Reset() { *m = SerializedReference{} }
func (*SerializedReference) ProtoMessage() {}
func (*SerializedReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{197}
+ return fileDescriptor_6c07b07c062484ab, []int{199}
}
func (m *SerializedReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5596,7 +5652,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo
func (m *Service) Reset() { *m = Service{} }
func (*Service) ProtoMessage() {}
func (*Service) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{198}
+ return fileDescriptor_6c07b07c062484ab, []int{200}
}
func (m *Service) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5624,7 +5680,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo
func (m *ServiceAccount) Reset() { *m = ServiceAccount{} }
func (*ServiceAccount) ProtoMessage() {}
func (*ServiceAccount) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{199}
+ return fileDescriptor_6c07b07c062484ab, []int{201}
}
func (m *ServiceAccount) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5652,7 +5708,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo
func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} }
func (*ServiceAccountList) ProtoMessage() {}
func (*ServiceAccountList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{200}
+ return fileDescriptor_6c07b07c062484ab, []int{202}
}
func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5680,7 +5736,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo
func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} }
func (*ServiceAccountTokenProjection) ProtoMessage() {}
func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{201}
+ return fileDescriptor_6c07b07c062484ab, []int{203}
}
func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5708,7 +5764,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo
func (m *ServiceList) Reset() { *m = ServiceList{} }
func (*ServiceList) ProtoMessage() {}
func (*ServiceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{202}
+ return fileDescriptor_6c07b07c062484ab, []int{204}
}
func (m *ServiceList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5736,7 +5792,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo
func (m *ServicePort) Reset() { *m = ServicePort{} }
func (*ServicePort) ProtoMessage() {}
func (*ServicePort) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{203}
+ return fileDescriptor_6c07b07c062484ab, []int{205}
}
func (m *ServicePort) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5764,7 +5820,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo
func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} }
func (*ServiceProxyOptions) ProtoMessage() {}
func (*ServiceProxyOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{204}
+ return fileDescriptor_6c07b07c062484ab, []int{206}
}
func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5792,7 +5848,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo
func (m *ServiceSpec) Reset() { *m = ServiceSpec{} }
func (*ServiceSpec) ProtoMessage() {}
func (*ServiceSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{205}
+ return fileDescriptor_6c07b07c062484ab, []int{207}
}
func (m *ServiceSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5820,7 +5876,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo
func (m *ServiceStatus) Reset() { *m = ServiceStatus{} }
func (*ServiceStatus) ProtoMessage() {}
func (*ServiceStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{206}
+ return fileDescriptor_6c07b07c062484ab, []int{208}
}
func (m *ServiceStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5848,7 +5904,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo
func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} }
func (*SessionAffinityConfig) ProtoMessage() {}
func (*SessionAffinityConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{207}
+ return fileDescriptor_6c07b07c062484ab, []int{209}
}
func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5876,7 +5932,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo
func (m *SleepAction) Reset() { *m = SleepAction{} }
func (*SleepAction) ProtoMessage() {}
func (*SleepAction) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{208}
+ return fileDescriptor_6c07b07c062484ab, []int{210}
}
func (m *SleepAction) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5904,7 +5960,7 @@ var xxx_messageInfo_SleepAction proto.InternalMessageInfo
func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} }
func (*StorageOSPersistentVolumeSource) ProtoMessage() {}
func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{209}
+ return fileDescriptor_6c07b07c062484ab, []int{211}
}
func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5932,7 +5988,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo
func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} }
func (*StorageOSVolumeSource) ProtoMessage() {}
func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{210}
+ return fileDescriptor_6c07b07c062484ab, []int{212}
}
func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5960,7 +6016,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo
func (m *Sysctl) Reset() { *m = Sysctl{} }
func (*Sysctl) ProtoMessage() {}
func (*Sysctl) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{211}
+ return fileDescriptor_6c07b07c062484ab, []int{213}
}
func (m *Sysctl) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5988,7 +6044,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo
func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} }
func (*TCPSocketAction) ProtoMessage() {}
func (*TCPSocketAction) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{212}
+ return fileDescriptor_6c07b07c062484ab, []int{214}
}
func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6016,7 +6072,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo
func (m *Taint) Reset() { *m = Taint{} }
func (*Taint) ProtoMessage() {}
func (*Taint) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{213}
+ return fileDescriptor_6c07b07c062484ab, []int{215}
}
func (m *Taint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6044,7 +6100,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo
func (m *Toleration) Reset() { *m = Toleration{} }
func (*Toleration) ProtoMessage() {}
func (*Toleration) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{214}
+ return fileDescriptor_6c07b07c062484ab, []int{216}
}
func (m *Toleration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6072,7 +6128,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo
func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} }
func (*TopologySelectorLabelRequirement) ProtoMessage() {}
func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{215}
+ return fileDescriptor_6c07b07c062484ab, []int{217}
}
func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6100,7 +6156,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo
func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} }
func (*TopologySelectorTerm) ProtoMessage() {}
func (*TopologySelectorTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{216}
+ return fileDescriptor_6c07b07c062484ab, []int{218}
}
func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6128,7 +6184,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo
func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} }
func (*TopologySpreadConstraint) ProtoMessage() {}
func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{217}
+ return fileDescriptor_6c07b07c062484ab, []int{219}
}
func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6156,7 +6212,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo
func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} }
func (*TypedLocalObjectReference) ProtoMessage() {}
func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{218}
+ return fileDescriptor_6c07b07c062484ab, []int{220}
}
func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6184,7 +6240,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo
func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} }
func (*TypedObjectReference) ProtoMessage() {}
func (*TypedObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{219}
+ return fileDescriptor_6c07b07c062484ab, []int{221}
}
func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6212,7 +6268,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo
func (m *Volume) Reset() { *m = Volume{} }
func (*Volume) ProtoMessage() {}
func (*Volume) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{220}
+ return fileDescriptor_6c07b07c062484ab, []int{222}
}
func (m *Volume) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6240,7 +6296,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo
func (m *VolumeDevice) Reset() { *m = VolumeDevice{} }
func (*VolumeDevice) ProtoMessage() {}
func (*VolumeDevice) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{221}
+ return fileDescriptor_6c07b07c062484ab, []int{223}
}
func (m *VolumeDevice) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6268,7 +6324,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo
func (m *VolumeMount) Reset() { *m = VolumeMount{} }
func (*VolumeMount) ProtoMessage() {}
func (*VolumeMount) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{222}
+ return fileDescriptor_6c07b07c062484ab, []int{224}
}
func (m *VolumeMount) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6296,7 +6352,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo
func (m *VolumeMountStatus) Reset() { *m = VolumeMountStatus{} }
func (*VolumeMountStatus) ProtoMessage() {}
func (*VolumeMountStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{223}
+ return fileDescriptor_6c07b07c062484ab, []int{225}
}
func (m *VolumeMountStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6324,7 +6380,7 @@ var xxx_messageInfo_VolumeMountStatus proto.InternalMessageInfo
func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} }
func (*VolumeNodeAffinity) ProtoMessage() {}
func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{224}
+ return fileDescriptor_6c07b07c062484ab, []int{226}
}
func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6352,7 +6408,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo
func (m *VolumeProjection) Reset() { *m = VolumeProjection{} }
func (*VolumeProjection) ProtoMessage() {}
func (*VolumeProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{225}
+ return fileDescriptor_6c07b07c062484ab, []int{227}
}
func (m *VolumeProjection) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6380,7 +6436,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo
func (m *VolumeResourceRequirements) Reset() { *m = VolumeResourceRequirements{} }
func (*VolumeResourceRequirements) ProtoMessage() {}
func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{226}
+ return fileDescriptor_6c07b07c062484ab, []int{228}
}
func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6408,7 +6464,7 @@ var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo
func (m *VolumeSource) Reset() { *m = VolumeSource{} }
func (*VolumeSource) ProtoMessage() {}
func (*VolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{227}
+ return fileDescriptor_6c07b07c062484ab, []int{229}
}
func (m *VolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6436,7 +6492,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo
func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} }
func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {}
func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{228}
+ return fileDescriptor_6c07b07c062484ab, []int{230}
}
func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6464,7 +6520,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo
func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} }
func (*WeightedPodAffinityTerm) ProtoMessage() {}
func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{229}
+ return fileDescriptor_6c07b07c062484ab, []int{231}
}
func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6492,7 +6548,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo
func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} }
func (*WindowsSecurityContextOptions) ProtoMessage() {}
func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{230}
+ return fileDescriptor_6c07b07c062484ab, []int{232}
}
func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6583,6 +6639,7 @@ func init() {
proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource")
proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction")
proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource")
+ proto.RegisterType((*FileKeySelector)(nil), "k8s.io.api.core.v1.FileKeySelector")
proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource")
proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource.OptionsEntry")
proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource")
@@ -6671,6 +6728,7 @@ func init() {
proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm")
proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity")
proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions")
+ proto.RegisterType((*PodCertificateProjection)(nil), "k8s.io.api.core.v1.PodCertificateProjection")
proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition")
proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig")
proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption")
@@ -6787,1020 +6845,1033 @@ func init() {
}
var fileDescriptor_6c07b07c062484ab = []byte{
- // 16206 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x1c, 0xc9,
- 0x75, 0x30, 0xc6, 0xea, 0x9e, 0xf3, 0xcd, 0x9d, 0xb8, 0x06, 0xb3, 0x00, 0x1a, 0x5b, 0xbb, 0x8b,
- 0xc5, 0x5e, 0x03, 0x62, 0x0f, 0x2e, 0xb8, 0xbb, 0x5c, 0xed, 0x9c, 0x40, 0x2f, 0x30, 0x83, 0xde,
- 0xec, 0x01, 0x40, 0x2e, 0x97, 0x14, 0x0b, 0xdd, 0x39, 0x33, 0xc5, 0xe9, 0xae, 0xea, 0xad, 0xaa,
- 0x1e, 0x60, 0x60, 0x2a, 0x24, 0x51, 0x16, 0x25, 0x52, 0x72, 0x04, 0x43, 0x21, 0x59, 0x0e, 0x4a,
- 0xa1, 0x1f, 0xba, 0x65, 0x5a, 0xb2, 0x68, 0xc9, 0x92, 0x2c, 0xea, 0xb2, 0x2d, 0x47, 0xc8, 0xfe,
- 0x21, 0x4b, 0x8a, 0x30, 0xa9, 0xb0, 0xc2, 0x23, 0x73, 0x6c, 0x87, 0x42, 0x3f, 0x2c, 0x29, 0x64,
- 0xff, 0xb0, 0x27, 0xf4, 0x7d, 0xfc, 0x22, 0xcf, 0xca, 0xac, 0xa3, 0xbb, 0x07, 0x0b, 0x0c, 0x97,
- 0x8c, 0xfd, 0xd7, 0x9d, 0xef, 0xe5, 0xcb, 0xac, 0x3c, 0x5f, 0xbe, 0x13, 0xec, 0xad, 0x4b, 0xe1,
- 0xac, 0xeb, 0x5f, 0x70, 0x5a, 0xee, 0x85, 0x9a, 0x1f, 0x90, 0x0b, 0xdb, 0x17, 0x2f, 0x6c, 0x10,
- 0x8f, 0x04, 0x4e, 0x44, 0xea, 0xb3, 0xad, 0xc0, 0x8f, 0x7c, 0x84, 0x38, 0xce, 0xac, 0xd3, 0x72,
- 0x67, 0x29, 0xce, 0xec, 0xf6, 0xc5, 0x99, 0xe7, 0x36, 0xdc, 0x68, 0xb3, 0x7d, 0x7b, 0xb6, 0xe6,
- 0x37, 0x2f, 0x6c, 0xf8, 0x1b, 0xfe, 0x05, 0x86, 0x7a, 0xbb, 0xbd, 0xce, 0xfe, 0xb1, 0x3f, 0xec,
- 0x17, 0x27, 0x31, 0xf3, 0x62, 0xdc, 0x4c, 0xd3, 0xa9, 0x6d, 0xba, 0x1e, 0x09, 0x76, 0x2e, 0xb4,
- 0xb6, 0x36, 0x58, 0xbb, 0x01, 0x09, 0xfd, 0x76, 0x50, 0x23, 0xc9, 0x86, 0x3b, 0xd6, 0x0a, 0x2f,
- 0x34, 0x49, 0xe4, 0x64, 0x74, 0x77, 0xe6, 0x42, 0x5e, 0xad, 0xa0, 0xed, 0x45, 0x6e, 0x33, 0xdd,
- 0xcc, 0x47, 0xba, 0x55, 0x08, 0x6b, 0x9b, 0xa4, 0xe9, 0xa4, 0xea, 0xbd, 0x90, 0x57, 0xaf, 0x1d,
- 0xb9, 0x8d, 0x0b, 0xae, 0x17, 0x85, 0x51, 0x90, 0xac, 0x64, 0x7f, 0xd3, 0x82, 0xb3, 0x73, 0xb7,
- 0xaa, 0x4b, 0x0d, 0x27, 0x8c, 0xdc, 0xda, 0x7c, 0xc3, 0xaf, 0x6d, 0x55, 0x23, 0x3f, 0x20, 0x37,
- 0xfd, 0x46, 0xbb, 0x49, 0xaa, 0x6c, 0x20, 0xd0, 0xb3, 0x30, 0xb4, 0xcd, 0xfe, 0x97, 0x17, 0xa7,
- 0xad, 0xb3, 0xd6, 0xf9, 0xe1, 0xf9, 0xc9, 0xbf, 0xd8, 0x2d, 0x7d, 0x68, 0x6f, 0xb7, 0x34, 0x74,
- 0x53, 0x94, 0x63, 0x85, 0x81, 0xce, 0xc1, 0xc0, 0x7a, 0xb8, 0xb6, 0xd3, 0x22, 0xd3, 0x05, 0x86,
- 0x3b, 0x2e, 0x70, 0x07, 0x96, 0xab, 0xb4, 0x14, 0x0b, 0x28, 0xba, 0x00, 0xc3, 0x2d, 0x27, 0x88,
- 0xdc, 0xc8, 0xf5, 0xbd, 0xe9, 0xe2, 0x59, 0xeb, 0x7c, 0xff, 0xfc, 0x94, 0x40, 0x1d, 0xae, 0x48,
- 0x00, 0x8e, 0x71, 0x68, 0x37, 0x02, 0xe2, 0xd4, 0xaf, 0x7b, 0x8d, 0x9d, 0xe9, 0xbe, 0xb3, 0xd6,
- 0xf9, 0xa1, 0xb8, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0x7f, 0xa5, 0x00, 0x43, 0x73, 0xeb, 0xeb,
- 0xae, 0xe7, 0x46, 0x3b, 0xe8, 0x26, 0x8c, 0x7a, 0x7e, 0x9d, 0xc8, 0xff, 0xec, 0x2b, 0x46, 0x9e,
- 0x3f, 0x3b, 0x9b, 0x5e, 0x4a, 0xb3, 0xab, 0x1a, 0xde, 0xfc, 0xe4, 0xde, 0x6e, 0x69, 0x54, 0x2f,
- 0xc1, 0x06, 0x1d, 0x84, 0x61, 0xa4, 0xe5, 0xd7, 0x15, 0xd9, 0x02, 0x23, 0x5b, 0xca, 0x22, 0x5b,
- 0x89, 0xd1, 0xe6, 0x27, 0xf6, 0x76, 0x4b, 0x23, 0x5a, 0x01, 0xd6, 0x89, 0xa0, 0xdb, 0x30, 0x41,
- 0xff, 0x7a, 0x91, 0xab, 0xe8, 0x16, 0x19, 0xdd, 0xc7, 0xf2, 0xe8, 0x6a, 0xa8, 0xf3, 0x47, 0xf6,
- 0x76, 0x4b, 0x13, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xff, 0xa4, 0x05, 0x13, 0x73, 0xad, 0xd6, 0x5c,
- 0xd0, 0xf4, 0x83, 0x4a, 0xe0, 0xaf, 0xbb, 0x0d, 0x82, 0x5e, 0x86, 0xbe, 0x88, 0xce, 0x1a, 0x9f,
- 0xe1, 0xc7, 0xc4, 0xd0, 0xf6, 0xd1, 0xb9, 0xda, 0xdf, 0x2d, 0x1d, 0x49, 0xa0, 0xb3, 0xa9, 0x64,
- 0x15, 0xd0, 0x1b, 0x30, 0xd9, 0xf0, 0x6b, 0x4e, 0x63, 0xd3, 0x0f, 0x23, 0x01, 0x15, 0x53, 0x7f,
- 0x74, 0x6f, 0xb7, 0x34, 0x79, 0x2d, 0x01, 0xc3, 0x29, 0x6c, 0xfb, 0x1e, 0x8c, 0xcf, 0x45, 0x91,
- 0x53, 0xdb, 0x24, 0x75, 0xbe, 0xa0, 0xd0, 0x8b, 0xd0, 0xe7, 0x39, 0x4d, 0xd9, 0x99, 0xb3, 0xb2,
- 0x33, 0xab, 0x4e, 0x93, 0x76, 0x66, 0xf2, 0x86, 0xe7, 0xbe, 0xdb, 0x16, 0x8b, 0x94, 0x96, 0x61,
- 0x86, 0x8d, 0x9e, 0x07, 0xa8, 0x93, 0x6d, 0xb7, 0x46, 0x2a, 0x4e, 0xb4, 0x29, 0xfa, 0x80, 0x44,
- 0x5d, 0x58, 0x54, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x61, 0x78, 0x6e, 0xdb, 0x77, 0xeb, 0x15, 0xbf,
- 0x1e, 0xa2, 0x2d, 0x98, 0x68, 0x05, 0x64, 0x9d, 0x04, 0xaa, 0x68, 0xda, 0x3a, 0x5b, 0x3c, 0x3f,
- 0xf2, 0xfc, 0xf9, 0xcc, 0xb1, 0x37, 0x51, 0x97, 0xbc, 0x28, 0xd8, 0x99, 0x3f, 0x21, 0xda, 0x9b,
- 0x48, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0xe7, 0x05, 0x38, 0x36, 0x77, 0xaf, 0x1d, 0x90, 0x45, 0x37,
- 0xdc, 0x4a, 0x6e, 0xb8, 0xba, 0x1b, 0x6e, 0xad, 0xc6, 0x23, 0xa0, 0x56, 0xfa, 0xa2, 0x28, 0xc7,
- 0x0a, 0x03, 0x3d, 0x07, 0x83, 0xf4, 0xf7, 0x0d, 0x5c, 0x16, 0x9f, 0x7c, 0x44, 0x20, 0x8f, 0x2c,
- 0x3a, 0x91, 0xb3, 0xc8, 0x41, 0x58, 0xe2, 0xa0, 0x15, 0x18, 0xa9, 0xb1, 0xf3, 0x61, 0x63, 0xc5,
- 0xaf, 0x13, 0xb6, 0xb6, 0x86, 0xe7, 0x9f, 0xa1, 0xe8, 0x0b, 0x71, 0xf1, 0xfe, 0x6e, 0x69, 0x9a,
- 0xf7, 0x4d, 0x90, 0xd0, 0x60, 0x58, 0xaf, 0x8f, 0x6c, 0xb5, 0xdd, 0xfb, 0x18, 0x25, 0xc8, 0xd8,
- 0xea, 0xe7, 0xb5, 0x9d, 0xdb, 0xcf, 0x76, 0xee, 0x68, 0xf6, 0xae, 0x45, 0x17, 0xa1, 0x6f, 0xcb,
- 0xf5, 0xea, 0xd3, 0x03, 0x8c, 0xd6, 0x69, 0x3a, 0xe7, 0x57, 0x5d, 0xaf, 0xbe, 0xbf, 0x5b, 0x9a,
- 0x32, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0x1f, 0x0b, 0x4a, 0x0c, 0xb6, 0xec, 0x36, 0x48,
- 0x85, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x1e, 0x20, 0x24, 0xb5, 0x80, 0x44,
- 0xda, 0x90, 0xaa, 0x85, 0x51, 0x55, 0x10, 0xac, 0x61, 0xd1, 0xf3, 0x29, 0xdc, 0x74, 0x02, 0xb6,
- 0xbe, 0xc4, 0xc0, 0xaa, 0xf3, 0xa9, 0x2a, 0x01, 0x38, 0xc6, 0x31, 0xce, 0xa7, 0x62, 0xb7, 0xf3,
- 0x09, 0x7d, 0x0c, 0x26, 0xe2, 0xc6, 0xc2, 0x96, 0x53, 0x93, 0x03, 0xc8, 0x76, 0x70, 0xd5, 0x04,
- 0xe1, 0x24, 0xae, 0xfd, 0x9f, 0x5b, 0x62, 0xf1, 0xd0, 0xaf, 0x7e, 0x9f, 0x7f, 0xab, 0xfd, 0x07,
- 0x16, 0x0c, 0xce, 0xbb, 0x5e, 0xdd, 0xf5, 0x36, 0xd0, 0x67, 0x60, 0x88, 0x5e, 0x95, 0x75, 0x27,
- 0x72, 0xc4, 0x31, 0xfc, 0x61, 0x6d, 0x6f, 0xa9, 0x9b, 0x6b, 0xb6, 0xb5, 0xb5, 0x41, 0x0b, 0xc2,
- 0x59, 0x8a, 0x4d, 0x77, 0xdb, 0xf5, 0xdb, 0x9f, 0x25, 0xb5, 0x68, 0x85, 0x44, 0x4e, 0xfc, 0x39,
- 0x71, 0x19, 0x56, 0x54, 0xd1, 0x55, 0x18, 0x88, 0x9c, 0x60, 0x83, 0x44, 0xe2, 0x3c, 0xce, 0x3c,
- 0x37, 0x79, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0x6a, 0x24, 0xbe, 0xa5, 0xd6, 0x58, 0x55, 0x2c, 0x48,
- 0xd8, 0xff, 0x6e, 0x10, 0x4e, 0x2e, 0x54, 0xcb, 0x39, 0xeb, 0xea, 0x1c, 0x0c, 0xd4, 0x03, 0x77,
- 0x9b, 0x04, 0x62, 0x9c, 0x15, 0x95, 0x45, 0x56, 0x8a, 0x05, 0x14, 0x5d, 0x82, 0x51, 0x7e, 0x3f,
- 0x5e, 0x71, 0xbc, 0x7a, 0x7c, 0x3c, 0x0a, 0xec, 0xd1, 0x9b, 0x1a, 0x0c, 0x1b, 0x98, 0x07, 0x5c,
- 0x54, 0xe7, 0x12, 0x9b, 0x31, 0xef, 0xee, 0xfd, 0xa2, 0x05, 0x93, 0xbc, 0x99, 0xb9, 0x28, 0x0a,
- 0xdc, 0xdb, 0xed, 0x88, 0x84, 0xd3, 0xfd, 0xec, 0xa4, 0x5b, 0xc8, 0x1a, 0xad, 0xdc, 0x11, 0x98,
- 0xbd, 0x99, 0xa0, 0xc2, 0x0f, 0xc1, 0x69, 0xd1, 0xee, 0x64, 0x12, 0x8c, 0x53, 0xcd, 0xa2, 0x1f,
- 0xb1, 0x60, 0xa6, 0xe6, 0x7b, 0x51, 0xe0, 0x37, 0x1a, 0x24, 0xa8, 0xb4, 0x6f, 0x37, 0xdc, 0x70,
- 0x93, 0xaf, 0x53, 0x4c, 0xd6, 0xd9, 0x49, 0x90, 0x33, 0x87, 0x0a, 0x49, 0xcc, 0xe1, 0x99, 0xbd,
- 0xdd, 0xd2, 0xcc, 0x42, 0x2e, 0x29, 0xdc, 0xa1, 0x19, 0xb4, 0x05, 0x88, 0xde, 0xec, 0xd5, 0xc8,
- 0xd9, 0x20, 0x71, 0xe3, 0x83, 0xbd, 0x37, 0x7e, 0x7c, 0x6f, 0xb7, 0x84, 0x56, 0x53, 0x24, 0x70,
- 0x06, 0x59, 0xf4, 0x2e, 0x1c, 0xa5, 0xa5, 0xa9, 0x6f, 0x1d, 0xea, 0xbd, 0xb9, 0xe9, 0xbd, 0xdd,
- 0xd2, 0xd1, 0xd5, 0x0c, 0x22, 0x38, 0x93, 0x34, 0xfa, 0x21, 0x0b, 0x4e, 0xc6, 0x9f, 0xbf, 0x74,
- 0xb7, 0xe5, 0x78, 0xf5, 0xb8, 0xe1, 0xe1, 0xde, 0x1b, 0xa6, 0x67, 0xf2, 0xc9, 0x85, 0x3c, 0x4a,
- 0x38, 0xbf, 0x11, 0xe4, 0xc1, 0x11, 0xda, 0xb5, 0x64, 0xdb, 0xd0, 0x7b, 0xdb, 0x27, 0xf6, 0x76,
- 0x4b, 0x47, 0x56, 0xd3, 0x34, 0x70, 0x16, 0xe1, 0x99, 0x05, 0x38, 0x96, 0xb9, 0x3a, 0xd1, 0x24,
- 0x14, 0xb7, 0x08, 0x67, 0x02, 0x87, 0x31, 0xfd, 0x89, 0x8e, 0x42, 0xff, 0xb6, 0xd3, 0x68, 0x8b,
- 0x8d, 0x89, 0xf9, 0x9f, 0x57, 0x0a, 0x97, 0x2c, 0xfb, 0x7f, 0x28, 0xc2, 0xc4, 0x42, 0xb5, 0x7c,
- 0x5f, 0xbb, 0x5e, 0xbf, 0xf6, 0x0a, 0x1d, 0xaf, 0xbd, 0xf8, 0x12, 0x2d, 0xe6, 0x5e, 0xa2, 0x3f,
- 0x98, 0xb1, 0x65, 0xfb, 0xd8, 0x96, 0xfd, 0x68, 0xce, 0x96, 0x7d, 0xc0, 0x1b, 0x75, 0x3b, 0x67,
- 0xd5, 0xf6, 0xb3, 0x09, 0xcc, 0xe4, 0x90, 0x18, 0xef, 0x97, 0x3c, 0x6a, 0x0f, 0xb8, 0x74, 0x1f,
- 0xcc, 0x3c, 0xd6, 0x60, 0x74, 0xc1, 0x69, 0x39, 0xb7, 0xdd, 0x86, 0x1b, 0xb9, 0x24, 0x44, 0x4f,
- 0x42, 0xd1, 0xa9, 0xd7, 0x19, 0x77, 0x37, 0x3c, 0x7f, 0x6c, 0x6f, 0xb7, 0x54, 0x9c, 0xab, 0x53,
- 0x36, 0x03, 0x14, 0xd6, 0x0e, 0xa6, 0x18, 0xe8, 0x69, 0xe8, 0xab, 0x07, 0x7e, 0x6b, 0xba, 0xc0,
- 0x30, 0xe9, 0x2e, 0xef, 0x5b, 0x0c, 0xfc, 0x56, 0x02, 0x95, 0xe1, 0xd8, 0x7f, 0x56, 0x80, 0x53,
- 0x0b, 0xa4, 0xb5, 0xb9, 0x5c, 0xcd, 0xb9, 0x2f, 0xce, 0xc3, 0x50, 0xd3, 0xf7, 0xdc, 0xc8, 0x0f,
- 0x42, 0xd1, 0x34, 0x5b, 0x11, 0x2b, 0xa2, 0x0c, 0x2b, 0x28, 0x3a, 0x0b, 0x7d, 0xad, 0x98, 0x89,
- 0x1d, 0x95, 0x0c, 0x30, 0x63, 0x5f, 0x19, 0x84, 0x62, 0xb4, 0x43, 0x12, 0x88, 0x15, 0xa3, 0x30,
- 0x6e, 0x84, 0x24, 0xc0, 0x0c, 0x12, 0x73, 0x02, 0x94, 0x47, 0x10, 0x37, 0x42, 0x82, 0x13, 0xa0,
- 0x10, 0xac, 0x61, 0xa1, 0x0a, 0x0c, 0x87, 0x89, 0x99, 0xed, 0x69, 0x6b, 0x8e, 0x31, 0x56, 0x41,
- 0xcd, 0x64, 0x4c, 0xc4, 0xb8, 0xc1, 0x06, 0xba, 0xb2, 0x0a, 0x5f, 0x2f, 0x00, 0xe2, 0x43, 0xf8,
- 0x5d, 0x36, 0x70, 0x37, 0xd2, 0x03, 0xd7, 0xfb, 0x96, 0x78, 0x50, 0xa3, 0xf7, 0xff, 0x5a, 0x70,
- 0x6a, 0xc1, 0xf5, 0xea, 0x24, 0xc8, 0x59, 0x80, 0x0f, 0xe7, 0x29, 0x7f, 0x30, 0x26, 0xc5, 0x58,
- 0x62, 0x7d, 0x0f, 0x60, 0x89, 0xd9, 0xff, 0x6c, 0x01, 0xe2, 0x9f, 0xfd, 0xbe, 0xfb, 0xd8, 0x1b,
- 0xe9, 0x8f, 0x7d, 0x00, 0xcb, 0xc2, 0xbe, 0x06, 0xe3, 0x0b, 0x0d, 0x97, 0x78, 0x51, 0xb9, 0xb2,
- 0xe0, 0x7b, 0xeb, 0xee, 0x06, 0x7a, 0x05, 0xc6, 0x23, 0xb7, 0x49, 0xfc, 0x76, 0x54, 0x25, 0x35,
- 0xdf, 0x63, 0x2f, 0x57, 0xeb, 0x7c, 0xff, 0x3c, 0xda, 0xdb, 0x2d, 0x8d, 0xaf, 0x19, 0x10, 0x9c,
- 0xc0, 0xb4, 0x7f, 0x95, 0x9e, 0x5b, 0x8d, 0x76, 0x18, 0x91, 0x60, 0x2d, 0x68, 0x87, 0xd1, 0x7c,
- 0x9b, 0xf2, 0x9e, 0x95, 0xc0, 0xa7, 0xdd, 0x71, 0x7d, 0x0f, 0x9d, 0x32, 0x9e, 0xe3, 0x43, 0xf2,
- 0x29, 0x2e, 0x9e, 0xdd, 0xb3, 0x00, 0xa1, 0xbb, 0xe1, 0x91, 0x40, 0x7b, 0x3e, 0x8c, 0xb3, 0xad,
- 0xa2, 0x4a, 0xb1, 0x86, 0x81, 0x1a, 0x30, 0xd6, 0x70, 0x6e, 0x93, 0x46, 0x95, 0x34, 0x48, 0x2d,
- 0xf2, 0x03, 0x21, 0xdf, 0x78, 0xa1, 0xb7, 0x77, 0xc0, 0x35, 0xbd, 0xea, 0xfc, 0xd4, 0xde, 0x6e,
- 0x69, 0xcc, 0x28, 0xc2, 0x26, 0x71, 0x7a, 0x74, 0xf8, 0x2d, 0xfa, 0x15, 0x4e, 0x43, 0x7f, 0x7c,
- 0x5e, 0x17, 0x65, 0x58, 0x41, 0xd5, 0xd1, 0xd1, 0x97, 0x77, 0x74, 0xd8, 0x7f, 0x47, 0x17, 0x9a,
- 0xdf, 0x6c, 0xf9, 0x1e, 0xf1, 0xa2, 0x05, 0xdf, 0xab, 0x73, 0xc9, 0xd4, 0x2b, 0x86, 0xe8, 0xe4,
- 0x5c, 0x42, 0x74, 0x72, 0x3c, 0x5d, 0x43, 0x93, 0x9e, 0x7c, 0x14, 0x06, 0xc2, 0xc8, 0x89, 0xda,
- 0xa1, 0x18, 0xb8, 0x47, 0xe5, 0xb2, 0xab, 0xb2, 0xd2, 0xfd, 0xdd, 0xd2, 0x84, 0xaa, 0xc6, 0x8b,
- 0xb0, 0xa8, 0x80, 0x9e, 0x82, 0xc1, 0x26, 0x09, 0x43, 0x67, 0x43, 0xb2, 0x0d, 0x13, 0xa2, 0xee,
- 0xe0, 0x0a, 0x2f, 0xc6, 0x12, 0x8e, 0x1e, 0x83, 0x7e, 0x12, 0x04, 0x7e, 0x20, 0xbe, 0x6d, 0x4c,
- 0x20, 0xf6, 0x2f, 0xd1, 0x42, 0xcc, 0x61, 0xf6, 0xff, 0x6c, 0xc1, 0x84, 0xea, 0x2b, 0x6f, 0xeb,
- 0x10, 0x9e, 0x6b, 0x6f, 0x03, 0xd4, 0xe4, 0x07, 0x86, 0xec, 0x9a, 0x1d, 0x79, 0xfe, 0x5c, 0x26,
- 0x47, 0x93, 0x1a, 0xc6, 0x98, 0xb2, 0x2a, 0x0a, 0xb1, 0x46, 0xcd, 0xfe, 0x63, 0x0b, 0x8e, 0x24,
- 0xbe, 0xe8, 0x9a, 0x1b, 0x46, 0xe8, 0x9d, 0xd4, 0x57, 0xcd, 0xf6, 0xb8, 0xf8, 0xdc, 0x90, 0x7f,
- 0x93, 0xda, 0xf3, 0xb2, 0x44, 0xfb, 0xa2, 0x2b, 0xd0, 0xef, 0x46, 0xa4, 0x29, 0x3f, 0xe6, 0xb1,
- 0x8e, 0x1f, 0xc3, 0x7b, 0x15, 0xcf, 0x48, 0x99, 0xd6, 0xc4, 0x9c, 0x80, 0xfd, 0x67, 0x45, 0x18,
- 0xe6, 0xfb, 0x7b, 0xc5, 0x69, 0x1d, 0xc2, 0x5c, 0x3c, 0x03, 0xc3, 0x6e, 0xb3, 0xd9, 0x8e, 0x9c,
- 0xdb, 0xe2, 0xde, 0x1b, 0xe2, 0x67, 0x50, 0x59, 0x16, 0xe2, 0x18, 0x8e, 0xca, 0xd0, 0xc7, 0xba,
- 0xc2, 0xbf, 0xf2, 0xc9, 0xec, 0xaf, 0x14, 0x7d, 0x9f, 0x5d, 0x74, 0x22, 0x87, 0xb3, 0x9c, 0x6a,
- 0x5f, 0xd1, 0x22, 0xcc, 0x48, 0x20, 0x07, 0xe0, 0xb6, 0xeb, 0x39, 0xc1, 0x0e, 0x2d, 0x9b, 0x2e,
- 0x32, 0x82, 0xcf, 0x75, 0x26, 0x38, 0xaf, 0xf0, 0x39, 0x59, 0xf5, 0x61, 0x31, 0x00, 0x6b, 0x44,
- 0x67, 0x5e, 0x86, 0x61, 0x85, 0x7c, 0x10, 0xce, 0x71, 0xe6, 0x63, 0x30, 0x91, 0x68, 0xab, 0x5b,
- 0xf5, 0x51, 0x9d, 0xf1, 0xfc, 0x43, 0x76, 0x64, 0x88, 0x5e, 0x2f, 0x79, 0xdb, 0xe2, 0x6e, 0xba,
- 0x07, 0x47, 0x1b, 0x19, 0x47, 0xbe, 0x98, 0xd7, 0xde, 0xaf, 0x88, 0x53, 0xe2, 0xb3, 0x8f, 0x66,
- 0x41, 0x71, 0x66, 0x1b, 0xc6, 0x89, 0x58, 0xe8, 0x74, 0x22, 0xd2, 0xf3, 0xee, 0xa8, 0xea, 0xfc,
- 0x55, 0xb2, 0xa3, 0x0e, 0xd5, 0xef, 0x64, 0xf7, 0x4f, 0xf3, 0xd1, 0xe7, 0xc7, 0xe5, 0x88, 0x20,
- 0x50, 0xbc, 0x4a, 0x76, 0xf8, 0x54, 0xe8, 0x5f, 0x57, 0xec, 0xf8, 0x75, 0x5f, 0xb3, 0x60, 0x4c,
- 0x7d, 0xdd, 0x21, 0x9c, 0x0b, 0xf3, 0xe6, 0xb9, 0x70, 0xba, 0xe3, 0x02, 0xcf, 0x39, 0x11, 0xbe,
- 0x5e, 0x80, 0x93, 0x0a, 0x87, 0x3e, 0xa2, 0xf8, 0x1f, 0xb1, 0xaa, 0x2e, 0xc0, 0xb0, 0xa7, 0xc4,
- 0x89, 0x96, 0x29, 0xc7, 0x8b, 0x85, 0x89, 0x31, 0x0e, 0xbd, 0xf2, 0xbc, 0xf8, 0xd2, 0x1e, 0xd5,
- 0xe5, 0xec, 0xe2, 0x72, 0x9f, 0x87, 0x62, 0xdb, 0xad, 0x8b, 0x0b, 0xe6, 0xc3, 0x72, 0xb4, 0x6f,
- 0x94, 0x17, 0xf7, 0x77, 0x4b, 0x8f, 0xe6, 0xa9, 0x9c, 0xe8, 0xcd, 0x16, 0xce, 0xde, 0x28, 0x2f,
- 0x62, 0x5a, 0x19, 0xcd, 0xc1, 0x84, 0xd4, 0xaa, 0xdd, 0xa4, 0x7c, 0xa9, 0xef, 0x89, 0x7b, 0x48,
- 0x09, 0xcb, 0xb1, 0x09, 0xc6, 0x49, 0x7c, 0xb4, 0x08, 0x93, 0x5b, 0xed, 0xdb, 0xa4, 0x41, 0x22,
- 0xfe, 0xc1, 0x57, 0x09, 0x17, 0x25, 0x0f, 0xc7, 0x4f, 0xd8, 0xab, 0x09, 0x38, 0x4e, 0xd5, 0xb0,
- 0xbf, 0xcd, 0xee, 0x03, 0x31, 0x7a, 0x1a, 0x7f, 0xf3, 0x9d, 0x5c, 0xce, 0xbd, 0xac, 0x8a, 0xab,
- 0x64, 0x67, 0xcd, 0xa7, 0x7c, 0x48, 0xf6, 0xaa, 0x30, 0xd6, 0x7c, 0x5f, 0xc7, 0x35, 0xff, 0xbb,
- 0x05, 0x38, 0xa6, 0x46, 0xc0, 0xe0, 0x96, 0xbf, 0xdb, 0xc7, 0xe0, 0x22, 0x8c, 0xd4, 0xc9, 0xba,
- 0xd3, 0x6e, 0x44, 0x4a, 0xaf, 0xd1, 0xcf, 0x55, 0x6d, 0x8b, 0x71, 0x31, 0xd6, 0x71, 0x0e, 0x30,
- 0x6c, 0xbf, 0x39, 0xc6, 0x2e, 0xe2, 0xc8, 0xa1, 0x6b, 0x5c, 0xed, 0x1a, 0x2b, 0x77, 0xd7, 0x3c,
- 0x06, 0xfd, 0x6e, 0x93, 0x32, 0x66, 0x05, 0x93, 0xdf, 0x2a, 0xd3, 0x42, 0xcc, 0x61, 0xe8, 0x09,
- 0x18, 0xac, 0xf9, 0xcd, 0xa6, 0xe3, 0xd5, 0xd9, 0x95, 0x37, 0x3c, 0x3f, 0x42, 0x79, 0xb7, 0x05,
- 0x5e, 0x84, 0x25, 0x8c, 0x32, 0xdf, 0x4e, 0xb0, 0xc1, 0x85, 0x3d, 0x82, 0xf9, 0x9e, 0x0b, 0x36,
- 0x42, 0xcc, 0x4a, 0xe9, 0x5b, 0xf5, 0x8e, 0x1f, 0x6c, 0xb9, 0xde, 0xc6, 0xa2, 0x1b, 0x88, 0x2d,
- 0xa1, 0xee, 0xc2, 0x5b, 0x0a, 0x82, 0x35, 0x2c, 0xb4, 0x0c, 0xfd, 0x2d, 0x3f, 0x88, 0xc2, 0xe9,
- 0x01, 0x36, 0xdc, 0x8f, 0xe6, 0x1c, 0x44, 0xfc, 0x6b, 0x2b, 0x7e, 0x10, 0xc5, 0x1f, 0x40, 0xff,
- 0x85, 0x98, 0x57, 0x47, 0xd7, 0x60, 0x90, 0x78, 0xdb, 0xcb, 0x81, 0xdf, 0x9c, 0x3e, 0x92, 0x4f,
- 0x69, 0x89, 0xa3, 0xf0, 0x65, 0x16, 0xf3, 0xa8, 0xa2, 0x18, 0x4b, 0x12, 0xe8, 0xa3, 0x50, 0x24,
- 0xde, 0xf6, 0xf4, 0x20, 0xa3, 0x34, 0x93, 0x43, 0xe9, 0xa6, 0x13, 0xc4, 0x67, 0xfe, 0x92, 0xb7,
- 0x8d, 0x69, 0x1d, 0xf4, 0x09, 0x18, 0x96, 0x07, 0x46, 0x28, 0xa4, 0xa8, 0x99, 0x0b, 0x56, 0x1e,
- 0x33, 0x98, 0xbc, 0xdb, 0x76, 0x03, 0xd2, 0x24, 0x5e, 0x14, 0xc6, 0x27, 0xa4, 0x84, 0x86, 0x38,
- 0xa6, 0x86, 0x6a, 0x30, 0x1a, 0x90, 0xd0, 0xbd, 0x47, 0x2a, 0x7e, 0xc3, 0xad, 0xed, 0x4c, 0x9f,
- 0x60, 0xdd, 0x7b, 0xaa, 0xe3, 0x90, 0x61, 0xad, 0x42, 0x2c, 0xe5, 0xd7, 0x4b, 0xb1, 0x41, 0x14,
- 0xbd, 0x05, 0x63, 0x01, 0x09, 0x23, 0x27, 0x88, 0x44, 0x2b, 0xd3, 0x4a, 0x2b, 0x37, 0x86, 0x75,
- 0x00, 0x7f, 0x4e, 0xc4, 0xcd, 0xc4, 0x10, 0x6c, 0x52, 0x40, 0x9f, 0x90, 0x2a, 0x87, 0x15, 0xbf,
- 0xed, 0x45, 0xe1, 0xf4, 0x30, 0xeb, 0x77, 0xa6, 0x6e, 0xfa, 0x66, 0x8c, 0x97, 0xd4, 0x49, 0xf0,
- 0xca, 0xd8, 0x20, 0x85, 0x3e, 0x05, 0x63, 0xfc, 0x3f, 0x57, 0xa9, 0x86, 0xd3, 0xc7, 0x18, 0xed,
- 0xb3, 0xf9, 0xb4, 0x39, 0xe2, 0xfc, 0x31, 0x41, 0x7c, 0x4c, 0x2f, 0x0d, 0xb1, 0x49, 0x0d, 0x61,
- 0x18, 0x6b, 0xb8, 0xdb, 0xc4, 0x23, 0x61, 0x58, 0x09, 0xfc, 0xdb, 0x44, 0x48, 0x88, 0x4f, 0x66,
- 0xab, 0x60, 0xfd, 0xdb, 0x44, 0x3c, 0x02, 0xf5, 0x3a, 0xd8, 0x24, 0x81, 0x6e, 0xc0, 0x38, 0x7d,
- 0x92, 0xbb, 0x31, 0xd1, 0x91, 0x6e, 0x44, 0xd9, 0xc3, 0x19, 0x1b, 0x95, 0x70, 0x82, 0x08, 0xba,
- 0x0e, 0xa3, 0x6c, 0xcc, 0xdb, 0x2d, 0x4e, 0xf4, 0x78, 0x37, 0xa2, 0xcc, 0xa0, 0xa0, 0xaa, 0x55,
- 0xc1, 0x06, 0x01, 0xf4, 0x26, 0x0c, 0x37, 0xdc, 0x75, 0x52, 0xdb, 0xa9, 0x35, 0xc8, 0xf4, 0x28,
- 0xa3, 0x96, 0x79, 0x18, 0x5e, 0x93, 0x48, 0x9c, 0x3f, 0x57, 0x7f, 0x71, 0x5c, 0x1d, 0xdd, 0x84,
- 0xe3, 0x11, 0x09, 0x9a, 0xae, 0xe7, 0xd0, 0x43, 0x4c, 0x3c, 0x09, 0x99, 0x66, 0x7c, 0x8c, 0xad,
- 0xae, 0x33, 0x62, 0x36, 0x8e, 0xaf, 0x65, 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x2e, 0x4c, 0x67, 0x40,
- 0xf8, 0xba, 0x3d, 0xca, 0x28, 0xbf, 0x26, 0x28, 0x4f, 0xaf, 0xe5, 0xe0, 0xed, 0x77, 0x80, 0xe1,
- 0x5c, 0xea, 0xe8, 0x3a, 0x4c, 0xb0, 0x93, 0xb3, 0xd2, 0x6e, 0x34, 0x44, 0x83, 0xe3, 0xac, 0xc1,
- 0x27, 0x24, 0x1f, 0x51, 0x36, 0xc1, 0xfb, 0xbb, 0x25, 0x88, 0xff, 0xe1, 0x64, 0x6d, 0x74, 0x9b,
- 0x29, 0x61, 0xdb, 0x81, 0x1b, 0xed, 0xd0, 0x5d, 0x45, 0xee, 0x46, 0xd3, 0x13, 0x1d, 0x05, 0x52,
- 0x3a, 0xaa, 0xd2, 0xd4, 0xea, 0x85, 0x38, 0x49, 0x90, 0x5e, 0x05, 0x61, 0x54, 0x77, 0xbd, 0xe9,
- 0x49, 0xfe, 0x9e, 0x92, 0x27, 0x69, 0x95, 0x16, 0x62, 0x0e, 0x63, 0x0a, 0x58, 0xfa, 0xe3, 0x3a,
- 0xbd, 0x71, 0xa7, 0x18, 0x62, 0xac, 0x80, 0x95, 0x00, 0x1c, 0xe3, 0x50, 0x26, 0x38, 0x8a, 0x76,
- 0xa6, 0x11, 0x43, 0x55, 0x07, 0xe2, 0xda, 0xda, 0x27, 0x30, 0x2d, 0xb7, 0x6f, 0xc3, 0xb8, 0x3a,
- 0x26, 0xd8, 0x98, 0xa0, 0x12, 0xf4, 0x33, 0xb6, 0x4f, 0x88, 0x4f, 0x87, 0x69, 0x17, 0x18, 0x4b,
- 0x88, 0x79, 0x39, 0xeb, 0x82, 0x7b, 0x8f, 0xcc, 0xef, 0x44, 0x84, 0xcb, 0x22, 0x8a, 0x5a, 0x17,
- 0x24, 0x00, 0xc7, 0x38, 0xf6, 0xbf, 0xe7, 0xec, 0x73, 0x7c, 0x4b, 0xf4, 0x70, 0x2f, 0x3e, 0x0b,
- 0x43, 0xcc, 0xf0, 0xc3, 0x0f, 0xb8, 0x76, 0xb6, 0x3f, 0x66, 0x98, 0xaf, 0x88, 0x72, 0xac, 0x30,
- 0xd0, 0xab, 0x30, 0x56, 0xd3, 0x1b, 0x10, 0x97, 0xba, 0x3a, 0x46, 0x8c, 0xd6, 0xb1, 0x89, 0x8b,
- 0x2e, 0xc1, 0x10, 0xb3, 0x71, 0xaa, 0xf9, 0x0d, 0xc1, 0x6d, 0x4a, 0xce, 0x64, 0xa8, 0x22, 0xca,
- 0xf7, 0xb5, 0xdf, 0x58, 0x61, 0xa3, 0x73, 0x30, 0x40, 0xbb, 0x50, 0xae, 0x88, 0xeb, 0x54, 0x49,
- 0x02, 0xaf, 0xb0, 0x52, 0x2c, 0xa0, 0xf6, 0x1f, 0x5b, 0x8c, 0x97, 0x4a, 0x9f, 0xf9, 0xe8, 0x0a,
- 0xbb, 0x34, 0xd8, 0x0d, 0xa2, 0x69, 0xe1, 0x1f, 0xd7, 0x6e, 0x02, 0x05, 0xdb, 0x4f, 0xfc, 0xc7,
- 0x46, 0x4d, 0xf4, 0x76, 0xf2, 0x66, 0xe0, 0x0c, 0xc5, 0x8b, 0x72, 0x08, 0x92, 0xb7, 0xc3, 0x23,
- 0xf1, 0x15, 0x47, 0xfb, 0xd3, 0xe9, 0x8a, 0xb0, 0x7f, 0xaa, 0xa0, 0xad, 0x92, 0x6a, 0xe4, 0x44,
- 0x04, 0x55, 0x60, 0xf0, 0x8e, 0xe3, 0x46, 0xae, 0xb7, 0x21, 0xf8, 0xbe, 0xce, 0x17, 0x1d, 0xab,
- 0x74, 0x8b, 0x57, 0xe0, 0xdc, 0x8b, 0xf8, 0x83, 0x25, 0x19, 0x4a, 0x31, 0x68, 0x7b, 0x1e, 0xa5,
- 0x58, 0xe8, 0x95, 0x22, 0xe6, 0x15, 0x38, 0x45, 0xf1, 0x07, 0x4b, 0x32, 0xe8, 0x1d, 0x00, 0x79,
- 0x42, 0x90, 0xba, 0x90, 0x1d, 0x3e, 0xdb, 0x9d, 0xe8, 0x9a, 0xaa, 0xc3, 0x85, 0x93, 0xf1, 0x7f,
- 0xac, 0xd1, 0xb3, 0x23, 0x6d, 0x4e, 0xf5, 0xce, 0xa0, 0x4f, 0xd2, 0x2d, 0xea, 0x04, 0x11, 0xa9,
- 0xcf, 0x45, 0x62, 0x70, 0x9e, 0xee, 0xed, 0x71, 0xb8, 0xe6, 0x36, 0x89, 0xbe, 0x9d, 0x05, 0x11,
- 0x1c, 0xd3, 0xb3, 0x7f, 0xbf, 0x08, 0xd3, 0x79, 0xdd, 0xa5, 0x9b, 0x86, 0xdc, 0x75, 0xa3, 0x05,
- 0xca, 0xd6, 0x5a, 0xe6, 0xa6, 0x59, 0x12, 0xe5, 0x58, 0x61, 0xd0, 0xd5, 0x1b, 0xba, 0x1b, 0xf2,
- 0x6d, 0xdf, 0x1f, 0xaf, 0xde, 0x2a, 0x2b, 0xc5, 0x02, 0x4a, 0xf1, 0x02, 0xe2, 0x84, 0xc2, 0xf8,
- 0x4e, 0x5b, 0xe5, 0x98, 0x95, 0x62, 0x01, 0xd5, 0xa5, 0x8c, 0x7d, 0x5d, 0xa4, 0x8c, 0xc6, 0x10,
- 0xf5, 0x3f, 0xd8, 0x21, 0x42, 0x9f, 0x06, 0x58, 0x77, 0x3d, 0x37, 0xdc, 0x64, 0xd4, 0x07, 0x0e,
- 0x4c, 0x5d, 0x31, 0xc5, 0xcb, 0x8a, 0x0a, 0xd6, 0x28, 0xa2, 0x97, 0x60, 0x44, 0x1d, 0x20, 0xe5,
- 0x45, 0xa6, 0xfa, 0xd7, 0x4c, 0xa9, 0xe2, 0xd3, 0x74, 0x11, 0xeb, 0x78, 0xf6, 0x67, 0x93, 0xeb,
- 0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x5e, 0xc7, 0xb7, 0xd0, 0x79, 0x7c, 0xed, 0xbf, 0x1e, 0x86,
- 0x09, 0xa3, 0xb1, 0x76, 0xd8, 0xc3, 0x99, 0x7b, 0x99, 0x5e, 0x40, 0x4e, 0x44, 0xc4, 0xfe, 0xb3,
- 0xbb, 0x6f, 0x15, 0xfd, 0x92, 0xa2, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x86, 0xe1, 0x86, 0x13, 0x32,
- 0x89, 0x25, 0x11, 0xfb, 0xae, 0x17, 0x62, 0xf1, 0x83, 0xd0, 0x09, 0x23, 0xed, 0xd6, 0xe7, 0xb4,
- 0x63, 0x92, 0xf4, 0xa6, 0xa4, 0xfc, 0x95, 0xb4, 0xee, 0x54, 0x9d, 0xa0, 0x4c, 0xd8, 0x0e, 0xe6,
- 0x30, 0x74, 0x89, 0x1d, 0xad, 0x74, 0x55, 0x2c, 0x50, 0x6e, 0x94, 0x2d, 0xb3, 0x7e, 0x83, 0xc9,
- 0x56, 0x30, 0x6c, 0x60, 0xc6, 0x6f, 0xb2, 0x81, 0x0e, 0x6f, 0xb2, 0xa7, 0x60, 0x90, 0xfd, 0x50,
- 0x2b, 0x40, 0xcd, 0x46, 0x99, 0x17, 0x63, 0x09, 0x4f, 0x2e, 0x98, 0xa1, 0xde, 0x16, 0x0c, 0x7d,
- 0xf5, 0x89, 0x45, 0xcd, 0xcc, 0x2e, 0x86, 0xf8, 0x29, 0x27, 0x96, 0x3c, 0x96, 0x30, 0xf4, 0x6b,
- 0x16, 0x20, 0xa7, 0x41, 0x5f, 0xcb, 0xb4, 0x58, 0x3d, 0x6e, 0x80, 0xb1, 0xda, 0xaf, 0x76, 0x1d,
- 0xf6, 0x76, 0x38, 0x3b, 0x97, 0xaa, 0xcd, 0x25, 0xa5, 0xaf, 0x88, 0x2e, 0xa2, 0x34, 0x82, 0x7e,
- 0x19, 0x5d, 0x73, 0xc3, 0xe8, 0xf3, 0x7f, 0x9f, 0xb8, 0x9c, 0x32, 0xba, 0x84, 0x6e, 0xe8, 0x8f,
- 0xaf, 0x91, 0x03, 0x3e, 0xbe, 0xc6, 0x72, 0x1f, 0x5e, 0xdf, 0x9f, 0x78, 0xc0, 0x8c, 0xb2, 0x2f,
- 0x7f, 0xa2, 0xcb, 0x03, 0x46, 0x88, 0xd3, 0x7b, 0x79, 0xc6, 0x54, 0x84, 0x1e, 0x78, 0x8c, 0x75,
- 0xb9, 0xf3, 0x23, 0xf8, 0x46, 0x48, 0x82, 0xf9, 0x93, 0x52, 0x4d, 0xbc, 0xaf, 0xf3, 0x1e, 0x9a,
- 0xde, 0xf8, 0x87, 0x2c, 0x98, 0x4e, 0x0f, 0x10, 0xef, 0xd2, 0xf4, 0x38, 0xeb, 0xbf, 0xdd, 0x69,
- 0x64, 0x44, 0xe7, 0xa5, 0xb9, 0xeb, 0xf4, 0x5c, 0x0e, 0x2d, 0x9c, 0xdb, 0x0a, 0xba, 0x04, 0x10,
- 0x46, 0x7e, 0x8b, 0x9f, 0xf5, 0x8c, 0x99, 0x1d, 0x66, 0x06, 0x17, 0x50, 0x55, 0xa5, 0xfb, 0xf1,
- 0x5d, 0xa0, 0xe1, 0xce, 0xb4, 0xe1, 0x44, 0xce, 0x8a, 0xc9, 0x90, 0x77, 0x2f, 0xea, 0xf2, 0xee,
- 0x2e, 0x52, 0xd2, 0x59, 0x39, 0xa7, 0xb3, 0x6f, 0xb5, 0x1d, 0x2f, 0x72, 0xa3, 0x1d, 0x5d, 0x3e,
- 0xee, 0x81, 0x39, 0x94, 0xe8, 0x53, 0xd0, 0xdf, 0x70, 0xbd, 0xf6, 0x5d, 0x71, 0xc7, 0x9e, 0xcb,
- 0x7e, 0xfe, 0x78, 0xed, 0xbb, 0xe6, 0xe4, 0x94, 0xe8, 0x56, 0x66, 0xe5, 0xfb, 0xbb, 0x25, 0x94,
- 0x46, 0xc0, 0x9c, 0xaa, 0xfd, 0x34, 0x8c, 0x2f, 0x3a, 0xa4, 0xe9, 0x7b, 0x4b, 0x5e, 0xbd, 0xe5,
- 0xbb, 0x5e, 0x84, 0xa6, 0xa1, 0x8f, 0x31, 0x97, 0xfc, 0x6a, 0xed, 0xa3, 0x83, 0x8f, 0x59, 0x89,
- 0xbd, 0x01, 0xc7, 0x16, 0xfd, 0x3b, 0xde, 0x1d, 0x27, 0xa8, 0xcf, 0x55, 0xca, 0x9a, 0xbc, 0x70,
- 0x55, 0xca, 0xab, 0xac, 0x7c, 0x69, 0x80, 0x56, 0x93, 0x2f, 0xc2, 0x65, 0xb7, 0x41, 0x72, 0xa4,
- 0xba, 0x3f, 0x5b, 0x30, 0x5a, 0x8a, 0xf1, 0x95, 0x4e, 0xd2, 0xca, 0x35, 0x67, 0x78, 0x0b, 0x86,
- 0xd6, 0x5d, 0xd2, 0xa8, 0x63, 0xb2, 0x2e, 0x66, 0xe3, 0xc9, 0x7c, 0x83, 0xc7, 0x65, 0x8a, 0xa9,
- 0x94, 0xa7, 0x4c, 0xda, 0xb5, 0x2c, 0x2a, 0x63, 0x45, 0x06, 0x6d, 0xc1, 0xa4, 0x9c, 0x33, 0x09,
- 0x15, 0xe7, 0xfd, 0x53, 0x9d, 0x96, 0xaf, 0x49, 0x9c, 0x19, 0x7f, 0xe3, 0x04, 0x19, 0x9c, 0x22,
- 0x8c, 0x4e, 0x41, 0x5f, 0x93, 0x72, 0x36, 0x7d, 0x6c, 0xf8, 0x99, 0x78, 0x8b, 0x49, 0xea, 0x58,
- 0xa9, 0xfd, 0xf3, 0x16, 0x9c, 0x48, 0x8d, 0x8c, 0x90, 0x58, 0x3e, 0xe0, 0x59, 0x48, 0x4a, 0x10,
- 0x0b, 0xdd, 0x25, 0x88, 0xf6, 0x7f, 0x61, 0xc1, 0xd1, 0xa5, 0x66, 0x2b, 0xda, 0x59, 0x74, 0x4d,
- 0xdb, 0x83, 0x97, 0x61, 0xa0, 0x49, 0xea, 0x6e, 0xbb, 0x29, 0x66, 0xae, 0x24, 0x6f, 0xff, 0x15,
- 0x56, 0x4a, 0x4f, 0x90, 0x6a, 0xe4, 0x07, 0xce, 0x06, 0xe1, 0x05, 0x58, 0xa0, 0x33, 0x1e, 0xca,
- 0xbd, 0x47, 0xae, 0xb9, 0x4d, 0x37, 0xba, 0xbf, 0xdd, 0x25, 0xcc, 0x06, 0x24, 0x11, 0x1c, 0xd3,
- 0xb3, 0xbf, 0x69, 0xc1, 0x84, 0x5c, 0xf7, 0x73, 0xf5, 0x7a, 0x40, 0xc2, 0x10, 0xcd, 0x40, 0xc1,
- 0x6d, 0x89, 0x5e, 0x82, 0xe8, 0x65, 0xa1, 0x5c, 0xc1, 0x05, 0xb7, 0x25, 0x9f, 0x6b, 0x8c, 0xc1,
- 0x28, 0x9a, 0x16, 0x14, 0x57, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x3c, 0x0c, 0x79, 0x7e, 0x9d, 0xbf,
- 0x78, 0x84, 0x0e, 0x9d, 0x62, 0xae, 0x8a, 0x32, 0xac, 0xa0, 0xa8, 0x02, 0xc3, 0xdc, 0xbe, 0x36,
- 0x5e, 0xb4, 0x3d, 0x59, 0xe9, 0xb2, 0x2f, 0x5b, 0x93, 0x35, 0x71, 0x4c, 0xc4, 0xfe, 0x53, 0x0b,
- 0x46, 0xe5, 0x97, 0xf5, 0xf8, 0x16, 0xa5, 0x5b, 0x2b, 0x7e, 0x87, 0xc6, 0x5b, 0x8b, 0xbe, 0x25,
- 0x19, 0xc4, 0x78, 0x42, 0x16, 0x0f, 0xf4, 0x84, 0xbc, 0x08, 0x23, 0x4e, 0xab, 0x55, 0x31, 0xdf,
- 0x9f, 0x6c, 0x29, 0xcd, 0xc5, 0xc5, 0x58, 0xc7, 0xb1, 0x7f, 0xae, 0x00, 0xe3, 0xf2, 0x0b, 0xaa,
- 0xed, 0xdb, 0x21, 0x89, 0xd0, 0x1a, 0x0c, 0x3b, 0x7c, 0x96, 0x88, 0x5c, 0xe4, 0x8f, 0x65, 0xcb,
- 0x45, 0x8d, 0x29, 0x8d, 0x19, 0xe9, 0x39, 0x59, 0x1b, 0xc7, 0x84, 0x50, 0x03, 0xa6, 0x3c, 0x3f,
- 0x62, 0x4c, 0x95, 0x82, 0x77, 0x52, 0x55, 0x27, 0xa9, 0x9f, 0x14, 0xd4, 0xa7, 0x56, 0x93, 0x54,
- 0x70, 0x9a, 0x30, 0x5a, 0x92, 0xb2, 0xe6, 0x62, 0xbe, 0x90, 0x50, 0x9f, 0xb8, 0x6c, 0x51, 0xb3,
- 0xfd, 0x47, 0x16, 0x0c, 0x4b, 0xb4, 0xc3, 0xb0, 0x4a, 0x58, 0x81, 0xc1, 0x90, 0x4d, 0x82, 0x1c,
- 0x1a, 0xbb, 0x53, 0xc7, 0xf9, 0x7c, 0xc5, 0xbc, 0x22, 0xff, 0x1f, 0x62, 0x49, 0x83, 0xa9, 0x1a,
- 0x55, 0xf7, 0xdf, 0x27, 0xaa, 0x46, 0xd5, 0x9f, 0x9c, 0x4b, 0xe9, 0x1f, 0x58, 0x9f, 0x35, 0xd9,
- 0x3d, 0x7d, 0xd2, 0xb4, 0x02, 0xb2, 0xee, 0xde, 0x4d, 0x3e, 0x69, 0x2a, 0xac, 0x14, 0x0b, 0x28,
- 0x7a, 0x07, 0x46, 0x6b, 0x52, 0xc7, 0x14, 0xef, 0xf0, 0x73, 0x1d, 0xf5, 0x9d, 0x4a, 0x35, 0xce,
- 0x65, 0xa4, 0x0b, 0x5a, 0x7d, 0x6c, 0x50, 0x33, 0xed, 0xc7, 0x8a, 0xdd, 0xec, 0xc7, 0x62, 0xba,
- 0xf9, 0xd6, 0x54, 0xbf, 0x60, 0xc1, 0x00, 0xd7, 0x2d, 0xf4, 0xa6, 0xda, 0xd1, 0x2c, 0x05, 0xe2,
- 0xb1, 0xbb, 0x49, 0x0b, 0x05, 0x67, 0x83, 0x56, 0x60, 0x98, 0xfd, 0x60, 0xba, 0x91, 0x62, 0xbe,
- 0xb7, 0x19, 0x6f, 0x55, 0xef, 0xe0, 0x4d, 0x59, 0x0d, 0xc7, 0x14, 0xec, 0x9f, 0x2e, 0xd2, 0xd3,
- 0x2d, 0x46, 0x35, 0x2e, 0x7d, 0xeb, 0xe1, 0x5d, 0xfa, 0x85, 0x87, 0x75, 0xe9, 0x6f, 0xc0, 0x44,
- 0x4d, 0xb3, 0x2b, 0x88, 0x67, 0xf2, 0x7c, 0xc7, 0x45, 0xa2, 0x99, 0x20, 0x70, 0xe9, 0xeb, 0x82,
- 0x49, 0x04, 0x27, 0xa9, 0xa2, 0x4f, 0xc2, 0x28, 0x9f, 0x67, 0xd1, 0x0a, 0x37, 0xc1, 0x7b, 0x22,
- 0x7f, 0xbd, 0xe8, 0x4d, 0x70, 0x69, 0xbd, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0x7f, 0xb1, 0x00, 0x2d,
- 0xb5, 0x36, 0x49, 0x93, 0x04, 0x4e, 0x23, 0x56, 0x0f, 0x7e, 0xc9, 0x82, 0x69, 0x92, 0x2a, 0x5e,
- 0xf0, 0x9b, 0x4d, 0x21, 0x0c, 0xc8, 0x91, 0x57, 0x2d, 0xe5, 0xd4, 0x89, 0x1f, 0x04, 0x79, 0x18,
- 0x38, 0xb7, 0x3d, 0xb4, 0x02, 0x47, 0xf8, 0x2d, 0xa9, 0x00, 0x9a, 0x95, 0xde, 0x23, 0x82, 0xf0,
- 0x91, 0xb5, 0x34, 0x0a, 0xce, 0xaa, 0x67, 0xff, 0xd1, 0x18, 0xe4, 0xf6, 0xe2, 0x03, 0xbd, 0xe8,
- 0x07, 0x7a, 0xd1, 0x0f, 0xf4, 0xa2, 0x1f, 0xe8, 0x45, 0x3f, 0xd0, 0x8b, 0x7e, 0xa0, 0x17, 0x7d,
- 0x9f, 0xea, 0x45, 0x7f, 0xc6, 0x82, 0x63, 0xea, 0xfa, 0x32, 0x1e, 0xec, 0x9f, 0x83, 0x23, 0x7c,
- 0xbb, 0x2d, 0x34, 0x1c, 0xb7, 0xb9, 0x46, 0x9a, 0xad, 0x86, 0x13, 0x49, 0xeb, 0xa7, 0x8b, 0x99,
- 0x2b, 0x37, 0xe1, 0x62, 0x61, 0x54, 0xe4, 0xbe, 0x6a, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0xfd, 0xfb,
- 0x43, 0xd0, 0xbf, 0xb4, 0x4d, 0xbc, 0xe8, 0x10, 0x9e, 0x36, 0x35, 0x18, 0x77, 0xbd, 0x6d, 0xbf,
- 0xb1, 0x4d, 0xea, 0x1c, 0x7e, 0x90, 0x17, 0xf8, 0x71, 0x41, 0x7a, 0xbc, 0x6c, 0x90, 0xc0, 0x09,
- 0x92, 0x0f, 0x43, 0xbb, 0x74, 0x19, 0x06, 0xf8, 0xe5, 0x23, 0x54, 0x4b, 0x99, 0x67, 0x36, 0x1b,
- 0x44, 0x71, 0xa5, 0xc6, 0x9a, 0x2f, 0x7e, 0xb9, 0x89, 0xea, 0xe8, 0xb3, 0x30, 0xbe, 0xee, 0x06,
- 0x61, 0xb4, 0xe6, 0x36, 0xe9, 0xd5, 0xd0, 0x6c, 0xdd, 0x87, 0x36, 0x49, 0x8d, 0xc3, 0xb2, 0x41,
- 0x09, 0x27, 0x28, 0xa3, 0x0d, 0x18, 0x6b, 0x38, 0x7a, 0x53, 0x83, 0x07, 0x6e, 0x4a, 0xdd, 0x0e,
- 0xd7, 0x74, 0x42, 0xd8, 0xa4, 0x4b, 0xb7, 0x53, 0x8d, 0x29, 0x44, 0x86, 0x98, 0x38, 0x43, 0x6d,
- 0x27, 0xae, 0x09, 0xe1, 0x30, 0xca, 0xa0, 0x31, 0x47, 0x85, 0x61, 0x93, 0x41, 0xd3, 0xdc, 0x11,
- 0x3e, 0x03, 0xc3, 0x84, 0x0e, 0x21, 0x25, 0x2c, 0x2e, 0x98, 0x0b, 0xbd, 0xf5, 0x75, 0xc5, 0xad,
- 0x05, 0xbe, 0xa9, 0xc7, 0x5b, 0x92, 0x94, 0x70, 0x4c, 0x14, 0x2d, 0xc0, 0x40, 0x48, 0x02, 0x57,
- 0xe9, 0x0a, 0x3a, 0x4c, 0x23, 0x43, 0xe3, 0xce, 0x90, 0xfc, 0x37, 0x16, 0x55, 0xe9, 0xf2, 0x72,
- 0x98, 0x28, 0x96, 0x5d, 0x06, 0xda, 0xf2, 0x9a, 0x63, 0xa5, 0x58, 0x40, 0xd1, 0x9b, 0x30, 0x18,
- 0x90, 0x06, 0x53, 0x14, 0x8f, 0xf5, 0xbe, 0xc8, 0xb9, 0xde, 0x99, 0xd7, 0xc3, 0x92, 0x00, 0xba,
- 0x0a, 0x28, 0x20, 0x94, 0xc1, 0x73, 0xbd, 0x0d, 0x65, 0xbe, 0x2f, 0x0e, 0x5a, 0xc5, 0x48, 0xe3,
- 0x18, 0x43, 0xfa, 0xc1, 0xe2, 0x8c, 0x6a, 0xe8, 0x32, 0x4c, 0xa9, 0xd2, 0xb2, 0x17, 0x46, 0x0e,
- 0x3d, 0xe0, 0xb8, 0xb8, 0x5e, 0xc9, 0x57, 0x70, 0x12, 0x01, 0xa7, 0xeb, 0xd8, 0xbf, 0x61, 0x01,
- 0x1f, 0xe7, 0x43, 0x90, 0x2a, 0xbc, 0x6e, 0x4a, 0x15, 0x4e, 0xe6, 0xce, 0x5c, 0x8e, 0x44, 0xe1,
- 0x37, 0x2c, 0x18, 0xd1, 0x66, 0x36, 0x5e, 0xb3, 0x56, 0x87, 0x35, 0xdb, 0x86, 0x49, 0xba, 0xd2,
- 0xaf, 0xdf, 0x0e, 0x49, 0xb0, 0x4d, 0xea, 0x6c, 0x61, 0x16, 0xee, 0x6f, 0x61, 0x2a, 0x53, 0xe1,
- 0x6b, 0x09, 0x82, 0x38, 0xd5, 0x84, 0xfd, 0x19, 0xd9, 0x55, 0x65, 0x59, 0x5d, 0x53, 0x73, 0x9e,
- 0xb0, 0xac, 0x56, 0xb3, 0x8a, 0x63, 0x1c, 0xba, 0xd5, 0x36, 0xfd, 0x30, 0x4a, 0x5a, 0x56, 0x5f,
- 0xf1, 0xc3, 0x08, 0x33, 0x88, 0xfd, 0x02, 0xc0, 0xd2, 0x5d, 0x52, 0xe3, 0x2b, 0x56, 0x7f, 0xf4,
- 0x58, 0xf9, 0x8f, 0x1e, 0xfb, 0x6f, 0x2c, 0x18, 0x5f, 0x5e, 0x30, 0x6e, 0xae, 0x59, 0x00, 0xfe,
- 0x52, 0xbb, 0x75, 0x6b, 0x55, 0x9a, 0xf7, 0x70, 0x0b, 0x07, 0x55, 0x8a, 0x35, 0x0c, 0x74, 0x12,
- 0x8a, 0x8d, 0xb6, 0x27, 0xc4, 0x9e, 0x83, 0xf4, 0x7a, 0xbc, 0xd6, 0xf6, 0x30, 0x2d, 0xd3, 0x7c,
- 0xe0, 0x8a, 0x3d, 0xfb, 0xc0, 0x75, 0x0d, 0xc5, 0x83, 0x4a, 0xd0, 0x7f, 0xe7, 0x8e, 0x5b, 0xe7,
- 0x11, 0x06, 0x84, 0xe9, 0xd1, 0xad, 0x5b, 0xe5, 0xc5, 0x10, 0xf3, 0x72, 0xfb, 0xcb, 0x45, 0x98,
- 0x59, 0x6e, 0x90, 0xbb, 0xef, 0x31, 0xca, 0x42, 0xaf, 0x1e, 0x7c, 0x07, 0x13, 0x20, 0x1d, 0xd4,
- 0x4b, 0xb3, 0xfb, 0x78, 0xac, 0xc3, 0x20, 0x37, 0x2c, 0x96, 0x31, 0x17, 0x32, 0xd5, 0xb9, 0xf9,
- 0x03, 0x32, 0xcb, 0x0d, 0x94, 0x85, 0x3a, 0x57, 0x5d, 0x98, 0xa2, 0x14, 0x4b, 0xe2, 0x33, 0xaf,
- 0xc0, 0xa8, 0x8e, 0x79, 0x20, 0x7f, 0xe9, 0x1f, 0x2e, 0xc2, 0x24, 0xed, 0xc1, 0x43, 0x9d, 0x88,
- 0x1b, 0xe9, 0x89, 0x78, 0xd0, 0x3e, 0xb3, 0xdd, 0x67, 0xe3, 0x9d, 0xe4, 0x6c, 0x5c, 0xcc, 0x9b,
- 0x8d, 0xc3, 0x9e, 0x83, 0x1f, 0xb1, 0xe0, 0xc8, 0x72, 0xc3, 0xaf, 0x6d, 0x25, 0xfc, 0x5a, 0x5f,
- 0x82, 0x11, 0x7a, 0x1c, 0x87, 0x46, 0x88, 0x17, 0x23, 0xe8, 0x8f, 0x00, 0x61, 0x1d, 0x4f, 0xab,
- 0x76, 0xe3, 0x46, 0x79, 0x31, 0x2b, 0x56, 0x90, 0x00, 0x61, 0x1d, 0xcf, 0xfe, 0x4b, 0x0b, 0x4e,
- 0x5f, 0x5e, 0x58, 0x8a, 0x97, 0x62, 0x2a, 0x5c, 0xd1, 0x39, 0x18, 0x68, 0xd5, 0xb5, 0xae, 0xc4,
- 0x62, 0xe1, 0x45, 0xd6, 0x0b, 0x01, 0x7d, 0xbf, 0x44, 0x06, 0xbb, 0x01, 0x70, 0x19, 0x57, 0x16,
- 0xc4, 0xb9, 0x2b, 0xb5, 0x40, 0x56, 0xae, 0x16, 0xe8, 0x09, 0x18, 0xa4, 0xf7, 0x82, 0x5b, 0x93,
- 0xfd, 0xe6, 0x06, 0x1b, 0xbc, 0x08, 0x4b, 0x98, 0xfd, 0xeb, 0x16, 0x1c, 0xb9, 0xec, 0x46, 0xf4,
- 0xd2, 0x4e, 0xc6, 0xe3, 0xa1, 0xb7, 0x76, 0xe8, 0x46, 0x7e, 0xb0, 0x93, 0x8c, 0xc7, 0x83, 0x15,
- 0x04, 0x6b, 0x58, 0xfc, 0x83, 0xb6, 0x5d, 0xe6, 0x29, 0x53, 0x30, 0xf5, 0x6e, 0x58, 0x94, 0x63,
- 0x85, 0x41, 0xc7, 0xab, 0xee, 0x06, 0x4c, 0x64, 0xb9, 0x23, 0x0e, 0x6e, 0x35, 0x5e, 0x8b, 0x12,
- 0x80, 0x63, 0x1c, 0xfb, 0x9f, 0x2c, 0x28, 0x5d, 0xe6, 0xfe, 0xbe, 0xeb, 0x61, 0xce, 0xa1, 0xfb,
- 0x02, 0x0c, 0x13, 0xa9, 0x20, 0x10, 0xbd, 0x56, 0x8c, 0xa8, 0xd2, 0x1c, 0xf0, 0xb0, 0x40, 0x0a,
- 0xaf, 0x07, 0xe7, 0xfb, 0x83, 0x79, 0x4f, 0x2f, 0x03, 0x22, 0x7a, 0x5b, 0x7a, 0x9c, 0x24, 0x16,
- 0x70, 0x65, 0x29, 0x05, 0xc5, 0x19, 0x35, 0xec, 0x9f, 0xb7, 0xe0, 0x98, 0xfa, 0xe0, 0xf7, 0xdd,
- 0x67, 0xda, 0xbf, 0x53, 0x80, 0xb1, 0x2b, 0x6b, 0x6b, 0x95, 0xcb, 0x24, 0xd2, 0x56, 0x65, 0x67,
- 0xb5, 0x3f, 0xd6, 0xb4, 0x97, 0x9d, 0xde, 0x88, 0xed, 0xc8, 0x6d, 0xcc, 0xf2, 0xe8, 0x7f, 0xb3,
- 0x65, 0x2f, 0xba, 0x1e, 0x54, 0xa3, 0xc0, 0xf5, 0x36, 0x32, 0x57, 0xba, 0xe4, 0x59, 0x8a, 0x79,
- 0x3c, 0x0b, 0x7a, 0x01, 0x06, 0x58, 0xf8, 0x41, 0x39, 0x09, 0x8f, 0xa8, 0x27, 0x16, 0x2b, 0xdd,
- 0xdf, 0x2d, 0x0d, 0xdf, 0xc0, 0x65, 0xfe, 0x07, 0x0b, 0x54, 0x74, 0x03, 0x46, 0x36, 0xa3, 0xa8,
- 0x75, 0x85, 0x38, 0x75, 0x12, 0xc8, 0x53, 0xf6, 0x4c, 0xd6, 0x29, 0x4b, 0x07, 0x81, 0xa3, 0xc5,
- 0x07, 0x53, 0x5c, 0x16, 0x62, 0x9d, 0x8e, 0x5d, 0x05, 0x88, 0x61, 0x0f, 0x48, 0x71, 0x63, 0xaf,
- 0xc1, 0x30, 0xfd, 0xdc, 0xb9, 0x86, 0xeb, 0x74, 0x56, 0x8d, 0x3f, 0x03, 0xc3, 0x52, 0xf1, 0x1d,
- 0x8a, 0xe0, 0x20, 0xec, 0x46, 0x92, 0x7a, 0xf1, 0x10, 0xc7, 0x70, 0xfb, 0x71, 0x10, 0xb6, 0xc3,
- 0x9d, 0x48, 0xda, 0xeb, 0x70, 0x94, 0x19, 0x41, 0x3b, 0xd1, 0xa6, 0xb1, 0x46, 0xbb, 0x2f, 0x86,
- 0x67, 0xc5, 0xbb, 0xae, 0xa0, 0xec, 0x7d, 0xa4, 0xf3, 0xf9, 0xa8, 0xa4, 0x18, 0xbf, 0xf1, 0xec,
- 0x7f, 0xec, 0x83, 0x47, 0xca, 0xd5, 0xfc, 0xa8, 0x56, 0x97, 0x60, 0x94, 0xb3, 0x8b, 0x74, 0x69,
- 0x38, 0x0d, 0xd1, 0xae, 0x92, 0x80, 0xae, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0xd3, 0x50, 0x74, 0xdf,
- 0xf5, 0x92, 0xae, 0x99, 0xe5, 0xb7, 0x56, 0x31, 0x2d, 0xa7, 0x60, 0xca, 0x79, 0xf2, 0x23, 0x5d,
- 0x81, 0x15, 0xf7, 0xf9, 0x3a, 0x8c, 0xbb, 0x61, 0x2d, 0x74, 0xcb, 0x1e, 0xdd, 0xa7, 0xda, 0x4e,
- 0x57, 0x32, 0x07, 0xda, 0x69, 0x05, 0xc5, 0x09, 0x6c, 0xed, 0x7e, 0xe9, 0xef, 0x99, 0x7b, 0xed,
- 0x1a, 0x53, 0x83, 0x1e, 0xff, 0x2d, 0xf6, 0x75, 0x21, 0x13, 0xc1, 0x8b, 0xe3, 0x9f, 0x7f, 0x70,
- 0x88, 0x25, 0x8c, 0x3e, 0xe8, 0x6a, 0x9b, 0x4e, 0x6b, 0xae, 0x1d, 0x6d, 0x2e, 0xba, 0x61, 0xcd,
- 0xdf, 0x26, 0xc1, 0x0e, 0x7b, 0x8b, 0x0f, 0xc5, 0x0f, 0x3a, 0x05, 0x58, 0xb8, 0x32, 0x57, 0xa1,
- 0x98, 0x38, 0x5d, 0x07, 0xcd, 0xc1, 0x84, 0x2c, 0xac, 0x92, 0x90, 0x5d, 0x01, 0x23, 0x8c, 0x8c,
- 0x72, 0x96, 0x14, 0xc5, 0x8a, 0x48, 0x12, 0xdf, 0x64, 0x70, 0xe1, 0x41, 0x30, 0xb8, 0x2f, 0xc3,
- 0x98, 0xeb, 0xb9, 0x91, 0xeb, 0x44, 0x3e, 0xd7, 0x1f, 0xf1, 0x67, 0x37, 0x13, 0x30, 0x97, 0x75,
- 0x00, 0x36, 0xf1, 0xec, 0xff, 0xb3, 0x0f, 0xa6, 0xd8, 0xb4, 0x7d, 0xb0, 0xc2, 0xbe, 0x97, 0x56,
- 0xd8, 0x8d, 0xf4, 0x0a, 0x7b, 0x10, 0x9c, 0xfb, 0x7d, 0x2f, 0xb3, 0x2f, 0x58, 0x30, 0xc5, 0x64,
- 0xdc, 0xc6, 0x32, 0xbb, 0x00, 0xc3, 0x81, 0xe1, 0xc7, 0x3a, 0xac, 0x2b, 0xb5, 0xa4, 0x4b, 0x6a,
- 0x8c, 0x83, 0xde, 0x00, 0x68, 0xc5, 0x32, 0xf4, 0x82, 0x11, 0x7c, 0x14, 0x72, 0xc5, 0xe7, 0x5a,
- 0x1d, 0xfb, 0xb3, 0x30, 0xac, 0x1c, 0x55, 0xa5, 0xa7, 0xba, 0x95, 0xe3, 0xa9, 0xde, 0x9d, 0x8d,
- 0x90, 0xb6, 0x71, 0xc5, 0x4c, 0xdb, 0xb8, 0xff, 0xcb, 0x82, 0x58, 0xc3, 0x81, 0xde, 0x82, 0xe1,
- 0x96, 0xcf, 0x4c, 0xa9, 0x03, 0xe9, 0x9f, 0xf0, 0x78, 0x47, 0x15, 0x09, 0x8f, 0x30, 0x18, 0xf0,
- 0xe9, 0xa8, 0xc8, 0xaa, 0x38, 0xa6, 0x82, 0xae, 0xc2, 0x60, 0x2b, 0x20, 0xd5, 0x88, 0x85, 0xbf,
- 0xea, 0x9d, 0x20, 0x5f, 0xbe, 0xbc, 0x22, 0x96, 0x14, 0x12, 0x96, 0xa9, 0xc5, 0xde, 0x2d, 0x53,
- 0xed, 0xdf, 0x2a, 0xc0, 0x64, 0xb2, 0x11, 0xf4, 0x1a, 0xf4, 0x91, 0xbb, 0xa4, 0x26, 0xbe, 0x34,
- 0x93, 0x9b, 0x88, 0xa5, 0x2b, 0x7c, 0xe8, 0xe8, 0x7f, 0xcc, 0x6a, 0xa1, 0x2b, 0x30, 0x48, 0x59,
- 0x89, 0xcb, 0x2a, 0x48, 0xe4, 0xa3, 0x79, 0xec, 0x88, 0xe2, 0xc9, 0xf8, 0x67, 0x89, 0x22, 0x2c,
- 0xab, 0x33, 0x53, 0xb6, 0x5a, 0xab, 0x4a, 0x5f, 0x69, 0x51, 0x27, 0x61, 0xc2, 0xda, 0x42, 0x85,
- 0x23, 0x09, 0x6a, 0xdc, 0x94, 0x4d, 0x16, 0xe2, 0x98, 0x08, 0x7a, 0x03, 0xfa, 0xc3, 0x06, 0x21,
- 0x2d, 0x61, 0xab, 0x90, 0x29, 0x1f, 0xad, 0x52, 0x04, 0x41, 0x89, 0xc9, 0x53, 0x58, 0x01, 0xe6,
- 0x15, 0xed, 0xdf, 0xb5, 0x00, 0xb8, 0xed, 0x9f, 0xe3, 0x6d, 0x90, 0x43, 0x50, 0x29, 0x2c, 0x42,
- 0x5f, 0xd8, 0x22, 0xb5, 0x4e, 0x1e, 0x06, 0x71, 0x7f, 0xaa, 0x2d, 0x52, 0x8b, 0x57, 0x3b, 0xfd,
- 0x87, 0x59, 0x6d, 0xfb, 0x47, 0x01, 0xc6, 0x63, 0xb4, 0x72, 0x44, 0x9a, 0xe8, 0x39, 0x23, 0xb2,
- 0xce, 0xc9, 0x44, 0x64, 0x9d, 0x61, 0x86, 0xad, 0x49, 0xaf, 0x3f, 0x0b, 0xc5, 0xa6, 0x73, 0x57,
- 0x88, 0x27, 0x9f, 0xe9, 0xdc, 0x0d, 0x4a, 0x7f, 0x76, 0xc5, 0xb9, 0xcb, 0x5f, 0xf0, 0xcf, 0xc8,
- 0xdd, 0xb9, 0xe2, 0xdc, 0xed, 0x6a, 0x05, 0x4f, 0x1b, 0x61, 0x6d, 0xb9, 0x9e, 0x30, 0x6b, 0xeb,
- 0xa9, 0x2d, 0xd7, 0x4b, 0xb6, 0xe5, 0x7a, 0x3d, 0xb4, 0xe5, 0x7a, 0xe8, 0x1e, 0x0c, 0x0a, 0xab,
- 0x53, 0x11, 0xf2, 0xef, 0x42, 0x0f, 0xed, 0x09, 0xa3, 0x55, 0xde, 0xe6, 0x05, 0x29, 0xa1, 0x10,
- 0xa5, 0x5d, 0xdb, 0x95, 0x0d, 0xa2, 0xff, 0xd4, 0x82, 0x71, 0xf1, 0x1b, 0x93, 0x77, 0xdb, 0x24,
- 0x8c, 0x04, 0x07, 0xff, 0x91, 0xde, 0xfb, 0x20, 0x2a, 0xf2, 0xae, 0x7c, 0x44, 0x5e, 0xb6, 0x26,
- 0xb0, 0x6b, 0x8f, 0x12, 0xbd, 0x40, 0xbf, 0x65, 0xc1, 0xd1, 0xa6, 0x73, 0x97, 0xb7, 0xc8, 0xcb,
- 0xb0, 0x13, 0xb9, 0xbe, 0xb0, 0xde, 0x78, 0xad, 0xb7, 0xe9, 0x4f, 0x55, 0xe7, 0x9d, 0x94, 0xaa,
- 0xda, 0xa3, 0x59, 0x28, 0x5d, 0xbb, 0x9a, 0xd9, 0xaf, 0x99, 0x75, 0x18, 0x92, 0xeb, 0xed, 0x61,
- 0x9a, 0xd4, 0xb3, 0x76, 0xc4, 0x5a, 0x7b, 0xa8, 0xed, 0x7c, 0x16, 0x46, 0xf5, 0x35, 0xf6, 0x50,
- 0xdb, 0x7a, 0x17, 0x8e, 0x64, 0xac, 0xa5, 0x87, 0xda, 0xe4, 0x1d, 0x38, 0x99, 0xbb, 0x3e, 0x1e,
- 0xaa, 0x4b, 0xc4, 0xef, 0x58, 0xfa, 0x39, 0x78, 0x08, 0x7a, 0x9d, 0x05, 0x53, 0xaf, 0x73, 0xa6,
- 0xf3, 0xce, 0xc9, 0x51, 0xee, 0xbc, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8, 0x4d, 0x18, 0x68, 0xd0,
- 0x12, 0x69, 0xbb, 0x6c, 0x77, 0xdf, 0x91, 0x31, 0x47, 0xcd, 0xca, 0x43, 0x2c, 0x28, 0xd8, 0x5f,
- 0xb1, 0x20, 0xc3, 0xa9, 0x83, 0x72, 0x58, 0x6d, 0xb7, 0xce, 0x86, 0xa4, 0x18, 0x73, 0x58, 0x2a,
- 0xf0, 0xcc, 0x69, 0x28, 0x6e, 0xb8, 0x75, 0xe1, 0xcd, 0xac, 0xc0, 0x97, 0x29, 0x78, 0xc3, 0xad,
- 0xa3, 0x65, 0x40, 0x61, 0xbb, 0xd5, 0x6a, 0x30, 0x83, 0x27, 0xa7, 0x71, 0x39, 0xf0, 0xdb, 0x2d,
- 0x6e, 0xa8, 0x5c, 0xe4, 0xe2, 0xa5, 0x6a, 0x0a, 0x8a, 0x33, 0x6a, 0xd8, 0x7f, 0x60, 0x41, 0xdf,
- 0x21, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xb9, 0x5c, 0xd2, 0x22, 0x53, 0xc4, 0x2c, 0x76, 0xee, 0x2c,
- 0xdd, 0x8d, 0x88, 0x17, 0x32, 0x86, 0x23, 0x73, 0xd6, 0x76, 0x2d, 0x38, 0x72, 0xcd, 0x77, 0xea,
- 0xf3, 0x4e, 0xc3, 0xf1, 0x6a, 0x24, 0x28, 0x7b, 0x1b, 0x07, 0xf2, 0x0a, 0x28, 0x74, 0xf5, 0x0a,
- 0xb8, 0x04, 0x03, 0x6e, 0x4b, 0x0b, 0x35, 0x7f, 0x96, 0xce, 0x6e, 0xb9, 0x22, 0xa2, 0xcc, 0x23,
- 0xa3, 0x71, 0x56, 0x8a, 0x05, 0x3e, 0x5d, 0x96, 0xdc, 0x1c, 0xaf, 0x2f, 0x7f, 0x59, 0xd2, 0x57,
- 0x52, 0x32, 0x84, 0x9a, 0x61, 0x38, 0xbe, 0x09, 0x46, 0x13, 0xc2, 0x4d, 0x0a, 0xc3, 0xa0, 0xcb,
- 0xbf, 0x54, 0xac, 0xcd, 0x27, 0xb3, 0x5f, 0x2f, 0xa9, 0x81, 0xd1, 0xfc, 0x01, 0x79, 0x01, 0x96,
- 0x84, 0xec, 0x4b, 0x90, 0x19, 0xf2, 0xa6, 0xbb, 0x64, 0xca, 0xfe, 0x04, 0x4c, 0xb1, 0x9a, 0x07,
- 0x94, 0xfa, 0xd8, 0x09, 0x79, 0x7a, 0x46, 0xd4, 0x60, 0xfb, 0x7f, 0xb5, 0x00, 0xad, 0xf8, 0x75,
- 0x77, 0x7d, 0x47, 0x10, 0xe7, 0xdf, 0xff, 0x2e, 0x94, 0xf8, 0xb3, 0x3a, 0x19, 0x59, 0x77, 0xa1,
- 0xe1, 0x84, 0xa1, 0x26, 0xcb, 0x7f, 0x52, 0xb4, 0x5b, 0x5a, 0xeb, 0x8c, 0x8e, 0xbb, 0xd1, 0x43,
- 0x6f, 0x25, 0x02, 0x1d, 0x7e, 0x34, 0x15, 0xe8, 0xf0, 0xc9, 0x4c, 0x8b, 0x9a, 0x74, 0xef, 0x65,
- 0x00, 0x44, 0xfb, 0x8b, 0x16, 0x4c, 0xac, 0x26, 0x22, 0xc5, 0x9e, 0x63, 0xe6, 0x05, 0x19, 0x3a,
- 0xaa, 0x2a, 0x2b, 0xc5, 0x02, 0xfa, 0xc0, 0x65, 0xb8, 0xdf, 0xb6, 0x20, 0x0e, 0xb1, 0x75, 0x08,
- 0x2c, 0xf7, 0x82, 0xc1, 0x72, 0x67, 0x3e, 0x5f, 0x54, 0x77, 0xf2, 0x38, 0x6e, 0x74, 0x55, 0xcd,
- 0x49, 0x87, 0x97, 0x4b, 0x4c, 0x86, 0xef, 0xb3, 0x71, 0x73, 0xe2, 0xd4, 0x6c, 0x7c, 0xa3, 0x00,
- 0x48, 0xe1, 0xf6, 0x1c, 0x1c, 0x33, 0x5d, 0xe3, 0xc1, 0x04, 0xc7, 0xdc, 0x06, 0xc4, 0x0c, 0x64,
- 0x02, 0xc7, 0x0b, 0x39, 0x59, 0x57, 0x48, 0xad, 0x0f, 0x66, 0x7d, 0x33, 0x23, 0xbd, 0x65, 0xaf,
- 0xa5, 0xa8, 0xe1, 0x8c, 0x16, 0x34, 0xc3, 0xa7, 0xfe, 0x5e, 0x0d, 0x9f, 0x06, 0xba, 0xb8, 0x7d,
- 0x7f, 0xcd, 0x82, 0x31, 0x35, 0x4c, 0xef, 0x13, 0xe7, 0x11, 0xd5, 0x9f, 0x9c, 0x7b, 0xa5, 0xa2,
- 0x75, 0x99, 0x31, 0x03, 0xdf, 0xc7, 0xdc, 0xf7, 0x9d, 0x86, 0x7b, 0x8f, 0xa8, 0x18, 0xce, 0x25,
- 0xe1, 0x8e, 0x2f, 0x4a, 0xf7, 0x77, 0x4b, 0x63, 0xea, 0x1f, 0x8f, 0x1a, 0x1b, 0x57, 0xb1, 0x7f,
- 0x99, 0x6e, 0x76, 0x73, 0x29, 0xa2, 0x97, 0xa0, 0xbf, 0xb5, 0xe9, 0x84, 0x24, 0xe1, 0x64, 0xd7,
- 0x5f, 0xa1, 0x85, 0xfb, 0xbb, 0xa5, 0x71, 0x55, 0x81, 0x95, 0x60, 0x8e, 0xdd, 0x7b, 0xc8, 0xd1,
- 0xf4, 0xe2, 0xec, 0x1a, 0x72, 0xf4, 0x5f, 0x2c, 0xe8, 0x5b, 0xa5, 0xb7, 0xd7, 0xc3, 0x3f, 0x02,
- 0x5e, 0x37, 0x8e, 0x80, 0x53, 0x79, 0xd9, 0x8c, 0x72, 0x77, 0xff, 0x72, 0x62, 0xf7, 0x9f, 0xc9,
- 0xa5, 0xd0, 0x79, 0xe3, 0x37, 0x61, 0x84, 0xe5, 0x48, 0x12, 0x0e, 0x85, 0x2f, 0x18, 0x1b, 0xbe,
- 0x94, 0xd8, 0xf0, 0x13, 0x1a, 0xaa, 0xb6, 0xd3, 0x9f, 0x82, 0x41, 0xe1, 0xa1, 0x96, 0x8c, 0x82,
- 0x20, 0x70, 0xb1, 0x84, 0xdb, 0xbf, 0x50, 0x04, 0x23, 0x27, 0x13, 0xfa, 0x23, 0x0b, 0x66, 0x03,
- 0x6e, 0xb9, 0x5e, 0x5f, 0x6c, 0x07, 0xae, 0xb7, 0x51, 0xad, 0x6d, 0x92, 0x7a, 0xbb, 0xe1, 0x7a,
- 0x1b, 0xe5, 0x0d, 0xcf, 0x57, 0xc5, 0x4b, 0x77, 0x49, 0xad, 0xcd, 0xb4, 0xca, 0x5d, 0x12, 0x40,
- 0x29, 0x0f, 0x90, 0xe7, 0xf7, 0x76, 0x4b, 0xb3, 0xf8, 0x40, 0xb4, 0xf1, 0x01, 0xfb, 0x82, 0xfe,
- 0xd2, 0x82, 0x0b, 0x3c, 0x37, 0x50, 0xef, 0xfd, 0xef, 0x20, 0xe1, 0xa8, 0x48, 0x52, 0x31, 0x91,
- 0x35, 0x12, 0x34, 0xe7, 0x5f, 0x16, 0x03, 0x7a, 0xa1, 0x72, 0xb0, 0xb6, 0xf0, 0x41, 0x3b, 0x67,
- 0xff, 0xb7, 0x45, 0x18, 0x13, 0xa1, 0x29, 0xc5, 0x1d, 0xf0, 0x92, 0xb1, 0x24, 0x1e, 0x4d, 0x2c,
- 0x89, 0x29, 0x03, 0xf9, 0xc1, 0x1c, 0xff, 0x21, 0x4c, 0xd1, 0xc3, 0xf9, 0x0a, 0x71, 0x82, 0xe8,
- 0x36, 0x71, 0xb8, 0x3d, 0x63, 0xf1, 0xc0, 0xa7, 0xbf, 0x12, 0xac, 0x5f, 0x4b, 0x12, 0xc3, 0x69,
- 0xfa, 0xdf, 0x4b, 0x77, 0x8e, 0x07, 0x93, 0xa9, 0xe8, 0xa2, 0x6f, 0xc3, 0xb0, 0x72, 0xaf, 0x12,
- 0x87, 0x4e, 0xe7, 0x20, 0xbd, 0x49, 0x0a, 0x5c, 0xe8, 0x19, 0xbb, 0xf6, 0xc5, 0xe4, 0xec, 0xdf,
- 0x2e, 0x18, 0x0d, 0xf2, 0x49, 0x5c, 0x85, 0x21, 0x27, 0x64, 0x81, 0xc3, 0xeb, 0x9d, 0x24, 0xda,
- 0xa9, 0x66, 0x98, 0x8b, 0xdb, 0x9c, 0xa8, 0x89, 0x15, 0x0d, 0x74, 0x85, 0x5b, 0x8d, 0x6e, 0x93,
- 0x4e, 0xe2, 0xec, 0x14, 0x35, 0x90, 0x76, 0xa5, 0xdb, 0x04, 0x8b, 0xfa, 0xe8, 0x53, 0xdc, 0xac,
- 0xf7, 0xaa, 0xe7, 0xdf, 0xf1, 0x2e, 0xfb, 0xbe, 0x0c, 0x43, 0xd4, 0x1b, 0xc1, 0x29, 0x69, 0xcc,
- 0xab, 0xaa, 0x63, 0x93, 0x5a, 0x6f, 0xe1, 0xba, 0x3f, 0x07, 0x2c, 0x17, 0x8a, 0x19, 0xcd, 0x20,
- 0x44, 0x04, 0x26, 0x44, 0xdc, 0x53, 0x59, 0x26, 0xc6, 0x2e, 0xf3, 0xf9, 0x6d, 0xd6, 0x8e, 0x35,
- 0x40, 0x57, 0x4d, 0x12, 0x38, 0x49, 0xd3, 0xde, 0xe4, 0x87, 0xf0, 0x32, 0x71, 0xa2, 0x76, 0x40,
- 0x42, 0xf4, 0x71, 0x98, 0x4e, 0xbf, 0x8c, 0x85, 0x22, 0xc5, 0x62, 0xdc, 0xf3, 0xa9, 0xbd, 0xdd,
- 0xd2, 0x74, 0x35, 0x07, 0x07, 0xe7, 0xd6, 0xb6, 0x7f, 0xcd, 0x02, 0xe6, 0x43, 0x7e, 0x08, 0x9c,
- 0xcf, 0xc7, 0x4c, 0xce, 0x67, 0x3a, 0x6f, 0x3a, 0x73, 0x98, 0x9e, 0x17, 0xf9, 0x1a, 0xae, 0x04,
- 0xfe, 0xdd, 0x1d, 0x61, 0xf5, 0xd5, 0xfd, 0x19, 0x67, 0x7f, 0xd9, 0x02, 0x96, 0x38, 0x08, 0xf3,
- 0x57, 0xbb, 0x54, 0x70, 0x74, 0x37, 0x68, 0xf8, 0x38, 0x0c, 0xad, 0x8b, 0xe1, 0xcf, 0x10, 0x3a,
- 0x19, 0x1d, 0x36, 0x69, 0xcb, 0x49, 0x13, 0xbe, 0xa0, 0xe2, 0x1f, 0x56, 0xd4, 0xec, 0xff, 0xd2,
- 0x82, 0x99, 0xfc, 0x6a, 0xe8, 0x06, 0x9c, 0x08, 0x48, 0xad, 0x1d, 0x84, 0x74, 0x4b, 0x88, 0x07,
- 0x90, 0x70, 0xa7, 0xe2, 0x53, 0xfd, 0xc8, 0xde, 0x6e, 0xe9, 0x04, 0xce, 0x46, 0xc1, 0x79, 0x75,
- 0xd1, 0x2b, 0x30, 0xde, 0x0e, 0x39, 0xe7, 0xc7, 0x98, 0xae, 0x50, 0x44, 0xa7, 0x66, 0x1e, 0x47,
- 0x37, 0x0c, 0x08, 0x4e, 0x60, 0xda, 0x3f, 0xc0, 0x97, 0xa3, 0x0a, 0x50, 0xdd, 0x84, 0x29, 0x4f,
- 0xfb, 0x4f, 0x6f, 0x40, 0xf9, 0xd4, 0x7f, 0xbc, 0xdb, 0xad, 0xcf, 0xae, 0x4b, 0xcd, 0xcb, 0x3d,
- 0x41, 0x06, 0xa7, 0x29, 0xdb, 0xbf, 0x68, 0xc1, 0x09, 0x1d, 0x51, 0x73, 0xa4, 0xeb, 0xa6, 0x05,
- 0x5c, 0x84, 0x21, 0xbf, 0x45, 0x02, 0x27, 0xf2, 0x03, 0x71, 0xcd, 0x9d, 0x97, 0x2b, 0xf4, 0xba,
- 0x28, 0xdf, 0x17, 0x09, 0x73, 0x24, 0x75, 0x59, 0x8e, 0x55, 0x4d, 0x64, 0xc3, 0x00, 0x13, 0x20,
- 0x86, 0xc2, 0x65, 0x92, 0x1d, 0x5a, 0xcc, 0xb2, 0x25, 0xc4, 0x02, 0x62, 0xff, 0xa3, 0xc5, 0xd7,
- 0xa7, 0xde, 0x75, 0xf4, 0x2e, 0x4c, 0x36, 0x9d, 0xa8, 0xb6, 0xb9, 0x74, 0xb7, 0x15, 0x70, 0xe5,
- 0xae, 0x1c, 0xa7, 0x67, 0xba, 0x8d, 0x93, 0xf6, 0x91, 0xb1, 0x69, 0xf5, 0x4a, 0x82, 0x18, 0x4e,
- 0x91, 0x47, 0xb7, 0x61, 0x84, 0x95, 0x31, 0x6f, 0xe0, 0xb0, 0x13, 0x2f, 0x93, 0xd7, 0x9a, 0x32,
- 0x0e, 0x5a, 0x89, 0xe9, 0x60, 0x9d, 0xa8, 0xfd, 0xd5, 0x22, 0x3f, 0x34, 0xd8, 0xdb, 0xe3, 0x29,
- 0x18, 0x6c, 0xf9, 0xf5, 0x85, 0xf2, 0x22, 0x16, 0xb3, 0xa0, 0xee, 0xbd, 0x0a, 0x2f, 0xc6, 0x12,
- 0x8e, 0xce, 0xc3, 0x90, 0xf8, 0x29, 0x95, 0xf1, 0x6c, 0x8f, 0x08, 0xbc, 0x10, 0x2b, 0x28, 0x7a,
- 0x1e, 0xa0, 0x15, 0xf8, 0xdb, 0x6e, 0x9d, 0x45, 0x7f, 0x2a, 0x9a, 0x76, 0x7d, 0x15, 0x05, 0xc1,
- 0x1a, 0x16, 0x7a, 0x15, 0xc6, 0xda, 0x5e, 0xc8, 0xf9, 0x27, 0x2d, 0xc6, 0xbe, 0xb2, 0x38, 0xbb,
- 0xa1, 0x03, 0xb1, 0x89, 0x8b, 0xe6, 0x60, 0x20, 0x72, 0x98, 0x9d, 0x5a, 0x7f, 0xbe, 0xf9, 0xfd,
- 0x1a, 0xc5, 0xd0, 0xb3, 0xd9, 0xd1, 0x0a, 0x58, 0x54, 0x44, 0x6f, 0x4b, 0xc7, 0x7c, 0x7e, 0x13,
- 0x09, 0xbf, 0x97, 0xde, 0x6e, 0x2d, 0xcd, 0x2d, 0x5f, 0xf8, 0xd3, 0x18, 0xb4, 0xd0, 0x2b, 0x00,
- 0xe4, 0x6e, 0x44, 0x02, 0xcf, 0x69, 0x28, 0xeb, 0x52, 0xc5, 0xc8, 0x2c, 0xfa, 0xab, 0x7e, 0x74,
- 0x23, 0x24, 0x4b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x8f, 0x8e, 0x00, 0xc4, 0x0f, 0x0d, 0x74, 0x0f,
- 0x86, 0x6a, 0x4e, 0xcb, 0xa9, 0xf1, 0x54, 0xad, 0xc5, 0x3c, 0x7f, 0xe9, 0xb8, 0xc6, 0xec, 0x82,
- 0x40, 0xe7, 0xca, 0x1b, 0x19, 0xa6, 0x7c, 0x48, 0x16, 0x77, 0x55, 0xd8, 0xa8, 0xf6, 0xd0, 0x17,
- 0x2c, 0x18, 0x11, 0xd1, 0x95, 0xd8, 0x0c, 0x15, 0xf2, 0xf5, 0x6d, 0x5a, 0xfb, 0x73, 0x71, 0x0d,
- 0xde, 0x85, 0x17, 0xe4, 0x0a, 0xd5, 0x20, 0x5d, 0x7b, 0xa1, 0x37, 0x8c, 0x3e, 0x2c, 0xdf, 0xb6,
- 0x45, 0x63, 0x28, 0xd5, 0xdb, 0x76, 0x98, 0x5d, 0x35, 0xfa, 0xb3, 0xf6, 0x86, 0xf1, 0xac, 0xed,
- 0xcb, 0xf7, 0x3c, 0x36, 0xf8, 0xed, 0x6e, 0x2f, 0x5a, 0x54, 0xd1, 0xa3, 0x90, 0xf4, 0xe7, 0xbb,
- 0xcb, 0x6a, 0x0f, 0xbb, 0x2e, 0x11, 0x48, 0x3e, 0x0b, 0x13, 0x75, 0x93, 0x6b, 0x11, 0x2b, 0xf1,
- 0xc9, 0x3c, 0xba, 0x09, 0x26, 0x27, 0xe6, 0x53, 0x12, 0x00, 0x9c, 0x24, 0x8c, 0x2a, 0x3c, 0x28,
- 0x4d, 0xd9, 0x5b, 0xf7, 0x85, 0xef, 0x95, 0x9d, 0x3b, 0x97, 0x3b, 0x61, 0x44, 0x9a, 0x14, 0x33,
- 0x66, 0x12, 0x56, 0x45, 0x5d, 0xac, 0xa8, 0xa0, 0x37, 0x61, 0x80, 0xf9, 0x4b, 0x86, 0xd3, 0x43,
- 0xf9, 0x6a, 0x0d, 0x33, 0xfa, 0x6a, 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b, 0x0a, 0xe8, 0x8a, 0xf4,
- 0x46, 0x0e, 0xcb, 0xde, 0x8d, 0x90, 0x30, 0x6f, 0xe4, 0xe1, 0xf9, 0xc7, 0x63, 0x47, 0x63, 0x5e,
- 0x9e, 0x99, 0xf3, 0xd6, 0xa8, 0x49, 0xd9, 0x3e, 0xf1, 0x5f, 0xa6, 0xd2, 0x15, 0xb1, 0xe2, 0x32,
- 0xbb, 0x67, 0xa6, 0xdb, 0x8d, 0x87, 0xf3, 0xa6, 0x49, 0x02, 0x27, 0x69, 0x52, 0x16, 0x9a, 0xef,
- 0x7a, 0xe1, 0xbd, 0xd5, 0xed, 0xec, 0xe0, 0x92, 0x03, 0x76, 0x1b, 0xf1, 0x12, 0x2c, 0xea, 0x23,
- 0x17, 0x26, 0x02, 0x83, 0xbd, 0x90, 0x21, 0xde, 0xce, 0xf5, 0xc6, 0xc4, 0x68, 0xc9, 0x03, 0x4c,
- 0x32, 0x38, 0x49, 0x17, 0xbd, 0xa9, 0x31, 0x4a, 0x63, 0x9d, 0x5f, 0xfe, 0xdd, 0x58, 0xa3, 0x99,
- 0x2d, 0x18, 0x33, 0x0e, 0x9b, 0x87, 0xaa, 0x82, 0xf4, 0x60, 0x32, 0x79, 0xb2, 0x3c, 0x54, 0xcd,
- 0xe3, 0x2b, 0x30, 0xce, 0x36, 0xc2, 0x1d, 0xa7, 0x25, 0x8e, 0xe2, 0xf3, 0xc6, 0x51, 0x6c, 0x9d,
- 0x2f, 0xf2, 0x81, 0x91, 0x43, 0x10, 0x1f, 0x9c, 0xf6, 0xaf, 0xf4, 0x8b, 0xca, 0x6a, 0x17, 0xa1,
- 0x0b, 0x30, 0x2c, 0x3a, 0xa0, 0x32, 0x70, 0xa9, 0x83, 0x61, 0x45, 0x02, 0x70, 0x8c, 0xc3, 0x12,
- 0xaf, 0xb1, 0xea, 0x9a, 0x87, 0x42, 0x9c, 0x78, 0x4d, 0x41, 0xb0, 0x86, 0x45, 0x1f, 0xbf, 0xb7,
- 0x7d, 0x3f, 0x52, 0x77, 0xb0, 0xda, 0x6a, 0xf3, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x7b, 0xb7, 0x48,
- 0xe0, 0x91, 0x86, 0x99, 0x82, 0x42, 0xdd, 0xbd, 0x57, 0x75, 0x20, 0x36, 0x71, 0x29, 0x07, 0xe1,
- 0x87, 0x6c, 0xef, 0x8a, 0x27, 0x76, 0xec, 0xf1, 0x51, 0xe5, 0xb1, 0x2b, 0x24, 0x1c, 0x7d, 0x02,
- 0x4e, 0xa8, 0x70, 0x8f, 0x62, 0x65, 0xca, 0x16, 0x07, 0x0c, 0x89, 0xd8, 0x89, 0x85, 0x6c, 0x34,
- 0x9c, 0x57, 0x1f, 0xbd, 0x0e, 0xe3, 0xe2, 0x19, 0x26, 0x29, 0x0e, 0x9a, 0xe6, 0x8b, 0x57, 0x0d,
- 0x28, 0x4e, 0x60, 0xcb, 0x24, 0x1a, 0xec, 0x7d, 0x22, 0x29, 0x0c, 0xa5, 0x93, 0x68, 0xe8, 0x70,
- 0x9c, 0xaa, 0x81, 0xe6, 0x60, 0x82, 0xb3, 0x9d, 0xae, 0xb7, 0xc1, 0xe7, 0x44, 0xf8, 0x93, 0xaa,
- 0x0d, 0x79, 0xdd, 0x04, 0xe3, 0x24, 0x3e, 0xba, 0x04, 0xa3, 0x4e, 0x50, 0xdb, 0x74, 0x23, 0x52,
- 0xa3, 0xbb, 0x8a, 0x59, 0x10, 0x6a, 0xf6, 0x9f, 0x73, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x0d, 0xe8,
- 0x0b, 0xef, 0x38, 0x2d, 0x71, 0xfa, 0xe4, 0x1f, 0xe5, 0x6a, 0x05, 0x73, 0xd3, 0x2f, 0xfa, 0x1f,
- 0xb3, 0x9a, 0xf6, 0x3d, 0x38, 0x92, 0x11, 0x16, 0x87, 0x2e, 0x3d, 0xa7, 0xe5, 0xca, 0x51, 0x49,
- 0xb8, 0x69, 0xcc, 0x55, 0xca, 0x72, 0x3c, 0x34, 0x2c, 0xba, 0xbe, 0x59, 0xf8, 0x1c, 0x2d, 0xdd,
- 0xb8, 0x5a, 0xdf, 0xcb, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x5f, 0x0b, 0x30, 0x91, 0xa1, 0x1e, 0x64,
- 0x29, 0xaf, 0x13, 0xef, 0xbc, 0x38, 0xc3, 0xb5, 0x99, 0xd5, 0xa5, 0x70, 0x80, 0xac, 0x2e, 0xc5,
- 0x6e, 0x59, 0x5d, 0xfa, 0xde, 0x4b, 0x56, 0x17, 0x73, 0xc4, 0xfa, 0x7b, 0x1a, 0xb1, 0x8c, 0x4c,
- 0x30, 0x03, 0x07, 0xcc, 0x04, 0x63, 0x0c, 0xfa, 0x60, 0x0f, 0x83, 0xfe, 0xd3, 0x05, 0x98, 0x4c,
- 0x6a, 0x16, 0x0f, 0x41, 0x3a, 0xff, 0xa6, 0x21, 0x9d, 0x3f, 0xdf, 0x4b, 0x04, 0x81, 0x5c, 0x49,
- 0x3d, 0x4e, 0x48, 0xea, 0x9f, 0xee, 0x89, 0x5a, 0x67, 0xa9, 0xfd, 0x2f, 0x15, 0xe0, 0x58, 0xa6,
- 0xc2, 0xf5, 0x10, 0xc6, 0xe6, 0xba, 0x31, 0x36, 0xcf, 0xf5, 0x1c, 0x5d, 0x21, 0x77, 0x80, 0x6e,
- 0x25, 0x06, 0xe8, 0x42, 0xef, 0x24, 0x3b, 0x8f, 0xd2, 0x37, 0x8b, 0x70, 0x26, 0xb3, 0x5e, 0x2c,
- 0xdc, 0x5e, 0x36, 0x84, 0xdb, 0xcf, 0x27, 0x84, 0xdb, 0x76, 0xe7, 0xda, 0x0f, 0x46, 0xda, 0x2d,
- 0xa2, 0x0c, 0xb0, 0x58, 0x29, 0xf7, 0x29, 0xe9, 0x36, 0xa2, 0x0c, 0x28, 0x42, 0xd8, 0xa4, 0xfb,
- 0xbd, 0x24, 0xe1, 0xfe, 0x1f, 0x2d, 0x38, 0x99, 0x39, 0x37, 0x87, 0x20, 0x67, 0x5c, 0x35, 0xe5,
- 0x8c, 0x4f, 0xf5, 0xbc, 0x5a, 0x73, 0x04, 0x8f, 0x5f, 0x1c, 0xc8, 0xf9, 0x16, 0x26, 0xfe, 0xb8,
- 0x0e, 0x23, 0x4e, 0xad, 0x46, 0xc2, 0x70, 0xc5, 0xaf, 0xab, 0x04, 0x10, 0xcf, 0xb1, 0xc7, 0x69,
- 0x5c, 0xbc, 0xbf, 0x5b, 0x9a, 0x49, 0x92, 0x88, 0xc1, 0x58, 0xa7, 0x80, 0x3e, 0x05, 0x43, 0xa1,
- 0xcc, 0xdd, 0xd9, 0x77, 0xff, 0xb9, 0x3b, 0x19, 0x27, 0xa9, 0xc4, 0x3b, 0x8a, 0x24, 0xfa, 0x7e,
- 0x3d, 0x6a, 0x55, 0x07, 0xc1, 0x26, 0xef, 0xe4, 0x7d, 0xc4, 0xae, 0x7a, 0x1e, 0x60, 0x5b, 0xbd,
- 0xa3, 0x92, 0xa2, 0x1b, 0xed, 0x85, 0xa5, 0x61, 0xa1, 0x37, 0x60, 0x32, 0xe4, 0x01, 0x5b, 0x63,
- 0x13, 0x19, 0xbe, 0x16, 0x59, 0xcc, 0xbb, 0x6a, 0x02, 0x86, 0x53, 0xd8, 0x68, 0x59, 0xb6, 0xca,
- 0x8c, 0xa1, 0xf8, 0xf2, 0x3c, 0x17, 0xb7, 0x28, 0x0c, 0xa2, 0x8e, 0x26, 0x27, 0x81, 0x0d, 0xbf,
- 0x56, 0x13, 0x7d, 0x0a, 0x80, 0x2e, 0x22, 0x21, 0xc2, 0x19, 0xcc, 0x3f, 0x42, 0xe9, 0xd9, 0x52,
- 0xcf, 0xf4, 0xc0, 0x60, 0xe1, 0x01, 0x16, 0x15, 0x11, 0xac, 0x11, 0x44, 0x0e, 0x8c, 0xc5, 0xff,
- 0xe2, 0xac, 0xf4, 0xe7, 0x73, 0x5b, 0x48, 0x12, 0x67, 0xea, 0x8d, 0x45, 0x9d, 0x04, 0x36, 0x29,
- 0xa2, 0x4f, 0xc2, 0xc9, 0xed, 0x5c, 0xbb, 0x23, 0xce, 0x4b, 0xb2, 0x34, 0xf3, 0xf9, 0xd6, 0x46,
- 0xf9, 0xf5, 0xed, 0xff, 0x09, 0xe0, 0x91, 0x0e, 0x27, 0x3d, 0x9a, 0x33, 0x6d, 0x06, 0x9e, 0x49,
- 0xca, 0x55, 0x66, 0x32, 0x2b, 0x1b, 0x82, 0x96, 0xc4, 0x86, 0x2a, 0xbc, 0xe7, 0x0d, 0xf5, 0x13,
- 0x96, 0xf6, 0xcc, 0xe2, 0x16, 0xe5, 0x1f, 0x3b, 0xe0, 0x0d, 0xf6, 0x00, 0x45, 0x60, 0xeb, 0x19,
- 0x72, 0xa4, 0xe7, 0x7b, 0xee, 0x4e, 0xef, 0x82, 0xa5, 0xdf, 0xc9, 0x0e, 0x71, 0xcf, 0x45, 0x4c,
- 0x97, 0x0f, 0xfa, 0xfd, 0x87, 0x15, 0xee, 0xfe, 0x1b, 0x16, 0x9c, 0x4c, 0x15, 0xf3, 0x3e, 0x90,
- 0x50, 0x44, 0xe9, 0x5b, 0x7d, 0xcf, 0x9d, 0x97, 0x04, 0xf9, 0x37, 0x5c, 0x11, 0xdf, 0x70, 0x32,
- 0x17, 0x2f, 0xd9, 0xf5, 0x2f, 0xfd, 0x7d, 0xe9, 0x08, 0x6b, 0xc0, 0x44, 0xc4, 0xf9, 0x5d, 0x47,
- 0x2d, 0x38, 0x5b, 0x6b, 0x07, 0x41, 0xbc, 0x58, 0x33, 0x36, 0x27, 0x7f, 0x2d, 0x3e, 0xbe, 0xb7,
- 0x5b, 0x3a, 0xbb, 0xd0, 0x05, 0x17, 0x77, 0xa5, 0x86, 0x3c, 0x40, 0xcd, 0x94, 0x75, 0x1f, 0x3b,
- 0x00, 0x72, 0xa4, 0x40, 0x69, 0x5b, 0x40, 0x6e, 0xa7, 0x9b, 0x61, 0x23, 0x98, 0x41, 0xf9, 0x70,
- 0x65, 0x37, 0xdf, 0x99, 0x78, 0xfa, 0x33, 0xd7, 0xe0, 0x4c, 0xe7, 0xc5, 0x74, 0xa0, 0x10, 0x14,
- 0x7f, 0x63, 0xc1, 0xe9, 0x8e, 0x71, 0xce, 0xbe, 0x0b, 0x1f, 0x0b, 0xf6, 0xe7, 0x2d, 0x78, 0x34,
- 0xb3, 0x46, 0xd2, 0x79, 0xb0, 0x46, 0x0b, 0x35, 0x63, 0xd8, 0x38, 0xe2, 0x8f, 0x04, 0xe0, 0x18,
- 0xc7, 0xb0, 0x17, 0x2d, 0x74, 0xb5, 0x17, 0xfd, 0x53, 0x0b, 0x52, 0x57, 0xfd, 0x21, 0x70, 0x9e,
- 0x65, 0x93, 0xf3, 0x7c, 0xbc, 0x97, 0xd1, 0xcc, 0x61, 0x3a, 0xff, 0x79, 0x02, 0x8e, 0xe7, 0x78,
- 0x90, 0x6f, 0xc3, 0xd4, 0x46, 0x8d, 0x98, 0x21, 0x43, 0x3a, 0x85, 0xd2, 0xeb, 0x18, 0x5f, 0x64,
- 0xfe, 0xd8, 0xde, 0x6e, 0x69, 0x2a, 0x85, 0x82, 0xd3, 0x4d, 0xa0, 0xcf, 0x5b, 0x70, 0xd4, 0xb9,
- 0x13, 0x2e, 0xd1, 0x17, 0x84, 0x5b, 0x9b, 0x6f, 0xf8, 0xb5, 0x2d, 0xca, 0x98, 0xc9, 0x6d, 0xf5,
- 0x62, 0xa6, 0x28, 0xfc, 0x56, 0x35, 0x85, 0x6f, 0x34, 0x3f, 0xbd, 0xb7, 0x5b, 0x3a, 0x9a, 0x85,
- 0x85, 0x33, 0xdb, 0x42, 0x58, 0xe4, 0x38, 0x73, 0xa2, 0xcd, 0x4e, 0x41, 0x6d, 0xb2, 0x5c, 0xfd,
- 0x39, 0x4b, 0x2c, 0x21, 0x58, 0xd1, 0x41, 0x9f, 0x81, 0xe1, 0x0d, 0x19, 0xbf, 0x22, 0x83, 0xe5,
- 0x8e, 0x07, 0xb2, 0x73, 0x54, 0x0f, 0x6e, 0x80, 0xa3, 0x90, 0x70, 0x4c, 0x14, 0xbd, 0x0e, 0x45,
- 0x6f, 0x3d, 0x14, 0xa1, 0xf5, 0xb2, 0xed, 0x80, 0x4d, 0x4b, 0x6b, 0x1e, 0x3a, 0x6a, 0x75, 0xb9,
- 0x8a, 0x69, 0x45, 0x74, 0x05, 0x8a, 0xc1, 0xed, 0xba, 0xd0, 0xe3, 0x64, 0x6e, 0x52, 0x3c, 0xbf,
- 0x98, 0xd3, 0x2b, 0x46, 0x09, 0xcf, 0x2f, 0x62, 0x4a, 0x02, 0x55, 0xa0, 0x9f, 0xb9, 0x5d, 0x0b,
- 0xd6, 0x36, 0xf3, 0x29, 0xdf, 0x21, 0x7c, 0x01, 0xf7, 0x87, 0x64, 0x08, 0x98, 0x13, 0x42, 0x6b,
- 0x30, 0x50, 0x73, 0xbd, 0x3a, 0x09, 0x04, 0x2f, 0xfb, 0xe1, 0x4c, 0x8d, 0x0d, 0xc3, 0xc8, 0xa1,
- 0xc9, 0x15, 0x18, 0x0c, 0x03, 0x0b, 0x5a, 0x8c, 0x2a, 0x69, 0x6d, 0xae, 0xcb, 0x1b, 0x2b, 0x9b,
- 0x2a, 0x69, 0x6d, 0x2e, 0x57, 0x3b, 0x52, 0x65, 0x18, 0x58, 0xd0, 0x42, 0xaf, 0x40, 0x61, 0xbd,
- 0x26, 0x5c, 0xaa, 0x33, 0xc5, 0x9b, 0x66, 0xf4, 0xaf, 0xf9, 0x81, 0xbd, 0xdd, 0x52, 0x61, 0x79,
- 0x01, 0x17, 0xd6, 0x6b, 0x68, 0x15, 0x06, 0xd7, 0x79, 0xbc, 0x20, 0x21, 0x1f, 0x7d, 0x32, 0x3b,
- 0x94, 0x51, 0x2a, 0xa4, 0x10, 0xf7, 0x6d, 0x15, 0x00, 0x2c, 0x89, 0xb0, 0x94, 0x5b, 0x2a, 0xee,
- 0x91, 0x08, 0xbb, 0x3a, 0x7b, 0xb0, 0x58, 0x55, 0xfc, 0xa9, 0x11, 0x47, 0x4f, 0xc2, 0x1a, 0x45,
- 0xba, 0xaa, 0x9d, 0x7b, 0xed, 0x80, 0xe5, 0xe4, 0x10, 0x8a, 0x99, 0xcc, 0x55, 0x3d, 0x27, 0x91,
- 0x3a, 0xad, 0x6a, 0x85, 0x84, 0x63, 0xa2, 0x68, 0x0b, 0xc6, 0xb6, 0xc3, 0xd6, 0x26, 0x91, 0x5b,
- 0x9a, 0x85, 0xeb, 0xcb, 0xe1, 0x66, 0x6f, 0x0a, 0x44, 0x37, 0x88, 0xda, 0x4e, 0x23, 0x75, 0x0a,
- 0xb1, 0x67, 0xcd, 0x4d, 0x9d, 0x18, 0x36, 0x69, 0xd3, 0xe1, 0x7f, 0xb7, 0xed, 0xdf, 0xde, 0x89,
- 0x88, 0x88, 0x96, 0x9a, 0x39, 0xfc, 0x6f, 0x71, 0x94, 0xf4, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0,
- 0x4d, 0x31, 0x3c, 0xec, 0xf4, 0x9c, 0xcc, 0x0f, 0xc5, 0x3e, 0x27, 0x91, 0x72, 0x06, 0x85, 0x9d,
- 0x96, 0x31, 0x29, 0x76, 0x4a, 0xb6, 0x36, 0xfd, 0xc8, 0xf7, 0x12, 0x27, 0xf4, 0x54, 0xfe, 0x29,
- 0x59, 0xc9, 0xc0, 0x4f, 0x9f, 0x92, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x54, 0x87, 0xf1, 0x96, 0x1f,
- 0x44, 0x77, 0xfc, 0x40, 0xae, 0x2f, 0xd4, 0x41, 0x50, 0x6a, 0x60, 0x8a, 0x16, 0x99, 0x59, 0x90,
- 0x09, 0xc1, 0x09, 0x9a, 0xe8, 0xe3, 0x30, 0x18, 0xd6, 0x9c, 0x06, 0x29, 0x5f, 0x9f, 0x3e, 0x92,
- 0x7f, 0xfd, 0x54, 0x39, 0x4a, 0xce, 0xea, 0xe2, 0xe1, 0x9e, 0x38, 0x0a, 0x96, 0xe4, 0xd0, 0x32,
- 0xf4, 0xb3, 0x54, 0xd6, 0x2c, 0xb4, 0x6f, 0x4e, 0x44, 0xf9, 0x94, 0x53, 0x0f, 0x3f, 0x9b, 0x58,
- 0x31, 0xe6, 0xd5, 0xe9, 0x1e, 0x10, 0x92, 0x02, 0x3f, 0x9c, 0x3e, 0x96, 0xbf, 0x07, 0x84, 0x80,
- 0xe1, 0x7a, 0xb5, 0xd3, 0x1e, 0x50, 0x48, 0x38, 0x26, 0x4a, 0x4f, 0x66, 0x7a, 0x9a, 0x1e, 0xef,
- 0x60, 0xb0, 0x99, 0x7b, 0x96, 0xb2, 0x93, 0x99, 0x9e, 0xa4, 0x94, 0x84, 0xfd, 0xc7, 0x43, 0x69,
- 0x9e, 0x85, 0x49, 0x98, 0xfe, 0x63, 0x2b, 0x65, 0xb1, 0xf1, 0x91, 0x5e, 0x05, 0xde, 0x0f, 0xf0,
- 0xe1, 0xfa, 0x79, 0x0b, 0x8e, 0xb7, 0x32, 0x3f, 0x44, 0x30, 0x00, 0xbd, 0xc9, 0xcd, 0xf9, 0xa7,
- 0xab, 0x30, 0xd0, 0xd9, 0x70, 0x9c, 0xd3, 0x52, 0x52, 0x38, 0x50, 0x7c, 0xcf, 0xc2, 0x81, 0x15,
- 0x18, 0xaa, 0xf1, 0x97, 0x9c, 0x4c, 0x5f, 0xd0, 0x53, 0x10, 0x53, 0xae, 0xa7, 0x15, 0x15, 0xb1,
- 0x22, 0x81, 0x7e, 0xd2, 0x82, 0xd3, 0xc9, 0xae, 0x63, 0xc2, 0xc0, 0xc2, 0x5c, 0x93, 0x8b, 0xb5,
- 0x96, 0xc5, 0xf7, 0xa7, 0xf8, 0x7f, 0x03, 0x79, 0xbf, 0x1b, 0x02, 0xee, 0xdc, 0x18, 0x5a, 0xcc,
- 0x90, 0xab, 0x0d, 0x98, 0x3a, 0xc9, 0x1e, 0x64, 0x6b, 0x2f, 0xc2, 0x68, 0xd3, 0x6f, 0x7b, 0x91,
- 0xb0, 0xba, 0x14, 0xa6, 0x5b, 0xcc, 0x64, 0x69, 0x45, 0x2b, 0xc7, 0x06, 0x56, 0x42, 0x22, 0x37,
- 0x74, 0xdf, 0x12, 0xb9, 0x77, 0x60, 0xd4, 0xd3, 0x1c, 0x12, 0x3a, 0xbd, 0x60, 0x85, 0x74, 0x51,
- 0xc3, 0xe6, 0xbd, 0xd4, 0x4b, 0xb0, 0x41, 0xad, 0xb3, 0xb4, 0x0c, 0xde, 0x9b, 0xb4, 0xec, 0x50,
- 0x9f, 0xc4, 0xf6, 0x6f, 0x16, 0x32, 0x5e, 0x0c, 0x5c, 0x2a, 0xf7, 0x9a, 0x29, 0x95, 0x3b, 0x97,
- 0x94, 0xca, 0xa5, 0x54, 0x55, 0x86, 0x40, 0xae, 0xf7, 0x1c, 0x9a, 0x3d, 0x07, 0xa6, 0xfe, 0x61,
- 0x0b, 0x4e, 0x30, 0xdd, 0x07, 0x6d, 0xe0, 0x3d, 0xeb, 0x3b, 0x98, 0x41, 0xec, 0xb5, 0x6c, 0x72,
- 0x38, 0xaf, 0x1d, 0xbb, 0x01, 0x67, 0xbb, 0xdd, 0xbb, 0xcc, 0xbe, 0xb8, 0xae, 0xcc, 0x2b, 0x62,
- 0xfb, 0xe2, 0x7a, 0x79, 0x11, 0x33, 0x48, 0xaf, 0x61, 0x17, 0xed, 0xff, 0xdb, 0x82, 0x62, 0xc5,
- 0xaf, 0x1f, 0xc2, 0x8b, 0xfe, 0x63, 0xc6, 0x8b, 0xfe, 0x91, 0xec, 0x1b, 0xbf, 0x9e, 0xab, 0xec,
- 0x5b, 0x4a, 0x28, 0xfb, 0x4e, 0xe7, 0x11, 0xe8, 0xac, 0xda, 0xfb, 0xe5, 0x22, 0x8c, 0x54, 0xfc,
- 0xba, 0xda, 0x67, 0xff, 0xfd, 0xfd, 0xb8, 0x11, 0xe5, 0x66, 0xcd, 0xd2, 0x28, 0x33, 0x7b, 0x62,
- 0x19, 0xf5, 0xe2, 0xbb, 0xcc, 0x9b, 0xe8, 0x16, 0x71, 0x37, 0x36, 0x23, 0x52, 0x4f, 0x7e, 0xce,
- 0xe1, 0x79, 0x13, 0x7d, 0xab, 0x08, 0x13, 0x89, 0xd6, 0x51, 0x03, 0xc6, 0x1a, 0xba, 0x2a, 0x49,
- 0xac, 0xd3, 0xfb, 0xd2, 0x42, 0x09, 0x6f, 0x0c, 0xad, 0x08, 0x9b, 0xc4, 0xd1, 0x2c, 0x80, 0xa7,
- 0xdb, 0xa4, 0xab, 0x00, 0xcb, 0x9a, 0x3d, 0xba, 0x86, 0x81, 0x5e, 0x82, 0x91, 0xc8, 0x6f, 0xf9,
- 0x0d, 0x7f, 0x63, 0xe7, 0x2a, 0x91, 0x11, 0x39, 0x95, 0xc9, 0xf2, 0x5a, 0x0c, 0xc2, 0x3a, 0x1e,
- 0xba, 0x0b, 0x53, 0x8a, 0x48, 0xf5, 0x01, 0xa8, 0xd7, 0x98, 0xd8, 0x64, 0x35, 0x49, 0x11, 0xa7,
- 0x1b, 0x41, 0xaf, 0xc0, 0x38, 0xb3, 0x9d, 0x66, 0xf5, 0xaf, 0x92, 0x1d, 0x19, 0xa9, 0x99, 0x71,
- 0xd8, 0x2b, 0x06, 0x04, 0x27, 0x30, 0xd1, 0x02, 0x4c, 0x35, 0xdd, 0x30, 0x51, 0x7d, 0x80, 0x55,
- 0x67, 0x1d, 0x58, 0x49, 0x02, 0x71, 0x1a, 0xdf, 0xfe, 0x75, 0x31, 0xc7, 0x5e, 0xe4, 0x7e, 0xb0,
- 0x1d, 0xdf, 0xdf, 0xdb, 0xf1, 0x9b, 0x16, 0x4c, 0xd2, 0xd6, 0x99, 0x41, 0xa8, 0x64, 0xa4, 0x54,
- 0x2e, 0x0f, 0xab, 0x43, 0x2e, 0x8f, 0x73, 0xf4, 0xd8, 0xae, 0xfb, 0xed, 0x48, 0x48, 0x47, 0xb5,
- 0x73, 0x99, 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x08, 0x84, 0xd7, 0xbd, 0x8e, 0x47, 0x82, 0x00,
- 0x0b, 0xa8, 0x4c, 0xf5, 0xd1, 0x97, 0x9d, 0xea, 0x83, 0x47, 0x6c, 0x17, 0x76, 0x74, 0x82, 0xa5,
- 0xd5, 0x22, 0xb6, 0x4b, 0x03, 0xbb, 0x18, 0xc7, 0xfe, 0x76, 0x11, 0x46, 0x2b, 0x7e, 0x3d, 0x36,
- 0xec, 0x78, 0xd1, 0x30, 0xec, 0x38, 0x9b, 0x30, 0xec, 0x98, 0xd4, 0x71, 0x35, 0x33, 0x8e, 0x37,
- 0x01, 0xf9, 0x22, 0x90, 0xfc, 0x65, 0xe2, 0x31, 0xbb, 0x37, 0x61, 0xa8, 0x57, 0x8c, 0xcd, 0x1e,
- 0xae, 0xa7, 0x30, 0x70, 0x46, 0xad, 0x0f, 0x4c, 0x42, 0x0e, 0xd7, 0x24, 0xe4, 0x4f, 0x2c, 0xb6,
- 0x02, 0x16, 0x57, 0xab, 0xdc, 0x56, 0x19, 0x5d, 0x84, 0x11, 0x76, 0x5a, 0xb2, 0x90, 0x11, 0xd2,
- 0x72, 0x82, 0xa5, 0xf1, 0x5c, 0x8d, 0x8b, 0xb1, 0x8e, 0x83, 0xce, 0xc3, 0x50, 0x48, 0x9c, 0xa0,
- 0xb6, 0xa9, 0xae, 0x0a, 0x61, 0xe6, 0xc0, 0xcb, 0xb0, 0x82, 0xa2, 0xb7, 0xe2, 0xc0, 0xe3, 0xc5,
- 0x7c, 0xc3, 0x67, 0xbd, 0x3f, 0x7c, 0xbb, 0xe5, 0x47, 0x1b, 0xb7, 0x6f, 0x01, 0x4a, 0xe3, 0xf7,
- 0xe0, 0x49, 0x56, 0x32, 0x43, 0xe3, 0x0e, 0xa7, 0xc2, 0xe2, 0xfe, 0x9b, 0x05, 0xe3, 0x15, 0xbf,
- 0x4e, 0x8f, 0x81, 0xef, 0xa5, 0x3d, 0xaf, 0x67, 0x5d, 0x18, 0xe8, 0x90, 0x75, 0xe1, 0x31, 0xe8,
- 0xaf, 0xf8, 0xf5, 0x2e, 0xe1, 0x7b, 0x7f, 0xc5, 0x82, 0xc1, 0x8a, 0x5f, 0x3f, 0x04, 0x25, 0xce,
- 0x6b, 0xa6, 0x12, 0xe7, 0x44, 0xce, 0xba, 0xc9, 0xd1, 0xdb, 0xfc, 0x79, 0x1f, 0x8c, 0xd1, 0x7e,
- 0xfa, 0x1b, 0x72, 0x2a, 0x8d, 0x61, 0xb3, 0x7a, 0x18, 0x36, 0xfa, 0xa4, 0xf0, 0x1b, 0x0d, 0xff,
- 0x4e, 0x72, 0x5a, 0x97, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x2c, 0x0c, 0xb5, 0x02, 0xb2, 0xed, 0xfa,
- 0x82, 0x57, 0xd7, 0x54, 0x62, 0x15, 0x51, 0x8e, 0x15, 0x06, 0x7d, 0xc4, 0x87, 0xae, 0x47, 0xf9,
- 0x92, 0x9a, 0xef, 0xd5, 0xb9, 0x9e, 0xa3, 0x28, 0x52, 0x83, 0x69, 0xe5, 0xd8, 0xc0, 0x42, 0xb7,
- 0x60, 0x98, 0xfd, 0x67, 0xc7, 0x4e, 0xff, 0x81, 0x8f, 0x1d, 0x91, 0x2c, 0x59, 0x10, 0xc0, 0x31,
- 0x2d, 0xf4, 0x3c, 0x40, 0x24, 0xd3, 0xeb, 0x84, 0x22, 0x8c, 0xab, 0x7a, 0xd7, 0xa8, 0xc4, 0x3b,
- 0x21, 0xd6, 0xb0, 0xd0, 0x33, 0x30, 0x1c, 0x39, 0x6e, 0xe3, 0x9a, 0xeb, 0x31, 0x5b, 0x00, 0xda,
- 0x7f, 0x91, 0xb3, 0x58, 0x14, 0xe2, 0x18, 0x4e, 0xf9, 0x4a, 0x16, 0xdd, 0x6a, 0x7e, 0x27, 0x12,
- 0xe9, 0xf9, 0x8a, 0x9c, 0xaf, 0xbc, 0xa6, 0x4a, 0xb1, 0x86, 0x81, 0x36, 0xe1, 0x94, 0xeb, 0xb1,
- 0x34, 0x5a, 0xa4, 0xba, 0xe5, 0xb6, 0xd6, 0xae, 0x55, 0x6f, 0x92, 0xc0, 0x5d, 0xdf, 0x99, 0x77,
- 0x6a, 0x5b, 0xc4, 0xab, 0x33, 0xb1, 0xc3, 0xd0, 0xfc, 0xe3, 0xa2, 0x8b, 0xa7, 0xca, 0x1d, 0x70,
- 0x71, 0x47, 0x4a, 0xc8, 0xa6, 0xdb, 0x31, 0x20, 0x4e, 0x53, 0xc8, 0x17, 0x78, 0x0a, 0x1e, 0x56,
- 0x82, 0x05, 0xc4, 0x7e, 0x81, 0xed, 0x89, 0xeb, 0x55, 0xf4, 0xb4, 0x71, 0xbc, 0x1c, 0xd7, 0x8f,
- 0x97, 0xfd, 0xdd, 0xd2, 0xc0, 0xf5, 0xaa, 0x16, 0xe9, 0xe8, 0x12, 0x1c, 0xab, 0xf8, 0xf5, 0x8a,
- 0x1f, 0x44, 0xcb, 0x7e, 0x70, 0xc7, 0x09, 0xea, 0x72, 0x09, 0x96, 0x64, 0xac, 0x27, 0x7a, 0xc6,
- 0xf6, 0xf3, 0x13, 0xc8, 0x88, 0xe3, 0xf4, 0x02, 0xe3, 0x10, 0x0f, 0xe8, 0x5a, 0x5b, 0x63, 0xbc,
- 0x8a, 0x4a, 0x56, 0x77, 0xd9, 0x89, 0x08, 0xba, 0x0e, 0x63, 0x35, 0xfd, 0xda, 0x16, 0xd5, 0x9f,
- 0x92, 0x97, 0x9d, 0x71, 0xa7, 0x67, 0xde, 0xf3, 0x66, 0x7d, 0xfb, 0x1b, 0x96, 0x68, 0x85, 0x4b,
- 0x3e, 0xb8, 0x0d, 0x6d, 0xf7, 0x33, 0x77, 0x01, 0xa6, 0x02, 0xbd, 0x8a, 0x66, 0x8b, 0x76, 0x8c,
- 0x67, 0xff, 0x49, 0x00, 0x71, 0x1a, 0x1f, 0x7d, 0x12, 0x4e, 0x1a, 0x85, 0x52, 0x2d, 0xaf, 0xe5,
- 0xe0, 0x66, 0xb2, 0x21, 0x9c, 0x87, 0x84, 0xf3, 0xeb, 0xdb, 0x3f, 0x08, 0xc7, 0x93, 0xdf, 0x25,
- 0xa4, 0x35, 0xf7, 0xf9, 0x75, 0x85, 0x83, 0x7d, 0x9d, 0xfd, 0x12, 0x4c, 0xd1, 0x67, 0xbc, 0x62,
- 0x49, 0xd9, 0xfc, 0x75, 0x0f, 0xa7, 0xf5, 0xdb, 0x43, 0xec, 0x1a, 0x4c, 0x64, 0xa0, 0x43, 0x9f,
- 0x86, 0xf1, 0x90, 0xb0, 0x18, 0x72, 0x52, 0x4a, 0xd8, 0xc1, 0x2f, 0xbe, 0xba, 0xa4, 0x63, 0xf2,
- 0x97, 0x90, 0x59, 0x86, 0x13, 0xd4, 0x50, 0x13, 0xc6, 0xef, 0xb8, 0x5e, 0xdd, 0xbf, 0x13, 0x4a,
- 0xfa, 0x43, 0xf9, 0x2a, 0x87, 0x5b, 0x1c, 0x33, 0xd1, 0x47, 0xa3, 0xb9, 0x5b, 0x06, 0x31, 0x9c,
- 0x20, 0x4e, 0x8f, 0x9a, 0xa0, 0xed, 0xcd, 0x85, 0x37, 0x42, 0x12, 0x88, 0x08, 0x77, 0xec, 0xa8,
- 0xc1, 0xb2, 0x10, 0xc7, 0x70, 0x7a, 0xd4, 0xb0, 0x3f, 0xcc, 0xb1, 0x9e, 0x9d, 0x65, 0xe2, 0xa8,
- 0xc1, 0xaa, 0x14, 0x6b, 0x18, 0xf4, 0x28, 0x66, 0xff, 0x56, 0x7d, 0x0f, 0xfb, 0x7e, 0x24, 0x0f,
- 0x6f, 0x96, 0xae, 0x53, 0x2b, 0xc7, 0x06, 0x56, 0x4e, 0x3c, 0xbd, 0xbe, 0x83, 0xc6, 0xd3, 0x43,
- 0x51, 0x87, 0x58, 0x02, 0x3c, 0x22, 0xf4, 0xa5, 0x4e, 0xb1, 0x04, 0xf6, 0xef, 0x2b, 0xce, 0x00,
- 0xe5, 0x05, 0xd6, 0xc5, 0x00, 0xf5, 0xf3, 0x80, 0x81, 0x4c, 0x29, 0x5a, 0xe5, 0xa3, 0x23, 0x61,
- 0x68, 0x09, 0x06, 0xc3, 0x9d, 0xb0, 0x16, 0x35, 0xc2, 0x4e, 0x29, 0x59, 0xab, 0x0c, 0x45, 0xcb,
- 0x08, 0xce, 0xab, 0x60, 0x59, 0x17, 0xd5, 0xe0, 0x88, 0xa0, 0xb8, 0xb0, 0xe9, 0x78, 0x2a, 0x51,
- 0x24, 0xb7, 0x7e, 0xbc, 0xb8, 0xb7, 0x5b, 0x3a, 0x22, 0x5a, 0xd6, 0xc1, 0xfb, 0xbb, 0x25, 0xba,
- 0x25, 0x33, 0x20, 0x38, 0x8b, 0x1a, 0x5f, 0xf2, 0xb5, 0x9a, 0xdf, 0x6c, 0x55, 0x02, 0x7f, 0xdd,
- 0x6d, 0x90, 0x4e, 0x8a, 0xe5, 0xaa, 0x81, 0x29, 0x96, 0xbc, 0x51, 0x86, 0x13, 0xd4, 0xd0, 0x6d,
- 0x98, 0x70, 0x5a, 0xad, 0xb9, 0xa0, 0xe9, 0x07, 0xb2, 0x81, 0x91, 0x7c, 0x0d, 0xc5, 0x9c, 0x89,
- 0xca, 0xf3, 0x44, 0x26, 0x0a, 0x71, 0x92, 0x20, 0x1d, 0x28, 0xb1, 0xd1, 0x8c, 0x81, 0x1a, 0x8b,
- 0x07, 0x4a, 0xec, 0xcb, 0x8c, 0x81, 0xca, 0x80, 0xe0, 0x2c, 0x6a, 0xf6, 0x0f, 0x30, 0xc6, 0x9f,
- 0xc5, 0x9b, 0x66, 0x6e, 0x46, 0x4d, 0x18, 0x6b, 0xb1, 0x63, 0x5f, 0xe4, 0x70, 0x13, 0x47, 0xc5,
- 0x8b, 0x3d, 0x0a, 0x42, 0xef, 0xb0, 0x2c, 0xb4, 0x86, 0x41, 0x6c, 0x45, 0x27, 0x87, 0x4d, 0xea,
- 0xf6, 0x2f, 0xcd, 0x30, 0xd6, 0xb1, 0xca, 0xa5, 0x9b, 0x83, 0xc2, 0xe9, 0x52, 0xc8, 0x33, 0x66,
- 0xf2, 0xf5, 0x08, 0xf1, 0xfa, 0x12, 0x8e, 0x9b, 0x58, 0xd6, 0x45, 0x9f, 0x82, 0x71, 0xd7, 0x73,
- 0xe3, 0xec, 0xcd, 0xe1, 0xf4, 0xd1, 0xfc, 0x68, 0x5e, 0x0a, 0x4b, 0xcf, 0xef, 0xa8, 0x57, 0xc6,
- 0x09, 0x62, 0xe8, 0x2d, 0x66, 0x23, 0x2a, 0x49, 0x17, 0x7a, 0x21, 0xad, 0x9b, 0x83, 0x4a, 0xb2,
- 0x1a, 0x11, 0xd4, 0x86, 0x23, 0xe9, 0x2c, 0xd6, 0xe1, 0xb4, 0x9d, 0xff, 0x36, 0x4a, 0x27, 0xa2,
- 0x8e, 0x13, 0xf1, 0xa5, 0x61, 0x21, 0xce, 0xa2, 0x8f, 0xae, 0x25, 0x73, 0x0c, 0x17, 0x0d, 0x0d,
- 0x44, 0x2a, 0xcf, 0xf0, 0x58, 0xc7, 0xf4, 0xc2, 0x1b, 0x70, 0x5a, 0x4b, 0xd3, 0x7a, 0x39, 0x70,
- 0x98, 0x8d, 0x92, 0xcb, 0x6e, 0x23, 0x8d, 0xa9, 0x7d, 0x74, 0x6f, 0xb7, 0x74, 0x7a, 0xad, 0x13,
- 0x22, 0xee, 0x4c, 0x07, 0x5d, 0x87, 0x63, 0x3c, 0x16, 0xcd, 0x22, 0x71, 0xea, 0x0d, 0xd7, 0x53,
- 0x5c, 0x33, 0x3f, 0xbb, 0x4e, 0xee, 0xed, 0x96, 0x8e, 0xcd, 0x65, 0x21, 0xe0, 0xec, 0x7a, 0xe8,
- 0x35, 0x18, 0xae, 0x7b, 0xf2, 0x94, 0x1d, 0x30, 0x32, 0xe1, 0x0e, 0x2f, 0xae, 0x56, 0xd5, 0xf7,
- 0xc7, 0x7f, 0x70, 0x5c, 0x01, 0x6d, 0x70, 0x15, 0x98, 0x92, 0x5b, 0x0e, 0xa6, 0x42, 0x94, 0x26,
- 0x45, 0xfb, 0x46, 0x70, 0x07, 0xae, 0xfb, 0x55, 0x0e, 0x80, 0x46, 0xdc, 0x07, 0x83, 0x30, 0x7a,
- 0x13, 0x90, 0xc8, 0xb8, 0x34, 0x57, 0x63, 0x09, 0x02, 0x35, 0xbb, 0x54, 0x25, 0x42, 0xa8, 0xa6,
- 0x30, 0x70, 0x46, 0x2d, 0x74, 0x85, 0x1e, 0x8f, 0x7a, 0xa9, 0x38, 0x7e, 0x55, 0xbe, 0xf5, 0x45,
- 0xd2, 0x0a, 0x08, 0x33, 0xa5, 0x34, 0x29, 0xe2, 0x44, 0x3d, 0x54, 0x87, 0x53, 0x4e, 0x3b, 0xf2,
- 0x99, 0x76, 0xd1, 0x44, 0x5d, 0xf3, 0xb7, 0x88, 0xc7, 0x14, 0xfb, 0x43, 0x2c, 0xf4, 0xe9, 0xa9,
- 0xb9, 0x0e, 0x78, 0xb8, 0x23, 0x15, 0xfa, 0x9c, 0xa2, 0x63, 0xa1, 0x29, 0xfe, 0x0c, 0x3f, 0x75,
- 0xae, 0x0d, 0x97, 0x18, 0xe8, 0x25, 0x18, 0xd9, 0xf4, 0xc3, 0x68, 0x95, 0x44, 0x77, 0xfc, 0x60,
- 0x4b, 0xa4, 0x78, 0x88, 0xd3, 0xea, 0xc4, 0x20, 0xac, 0xe3, 0xa1, 0xa7, 0x60, 0x90, 0x99, 0x9d,
- 0x95, 0x17, 0xd9, 0x5d, 0x3b, 0x14, 0x9f, 0x31, 0x57, 0x78, 0x31, 0x96, 0x70, 0x89, 0x5a, 0xae,
- 0x2c, 0xb0, 0xe3, 0x38, 0x81, 0x5a, 0xae, 0x2c, 0x60, 0x09, 0xa7, 0xcb, 0x35, 0xdc, 0x74, 0x02,
- 0x52, 0x09, 0xfc, 0x1a, 0x09, 0xb5, 0x64, 0x4e, 0x8f, 0xf0, 0x04, 0x16, 0x74, 0xb9, 0x56, 0xb3,
- 0x10, 0x70, 0x76, 0x3d, 0x44, 0xd2, 0x29, 0x8a, 0xc7, 0xf3, 0xd5, 0xae, 0x69, 0x76, 0xb0, 0xc7,
- 0x2c, 0xc5, 0x1e, 0x4c, 0xaa, 0xe4, 0xc8, 0x3c, 0x65, 0x45, 0x38, 0x3d, 0xc1, 0xd6, 0x76, 0xef,
- 0xf9, 0x2e, 0x94, 0x22, 0xbb, 0x9c, 0xa0, 0x84, 0x53, 0xb4, 0x8d, 0xd8, 0xba, 0x93, 0x5d, 0x63,
- 0xeb, 0x5e, 0x80, 0xe1, 0xb0, 0x7d, 0xbb, 0xee, 0x37, 0x1d, 0xd7, 0x63, 0xd6, 0x3b, 0xda, 0xc3,
- 0xbd, 0x2a, 0x01, 0x38, 0xc6, 0x41, 0xcb, 0x30, 0xe4, 0x48, 0x2d, 0x35, 0xca, 0x0f, 0x1b, 0xa8,
- 0x74, 0xd3, 0x3c, 0x92, 0x96, 0xd4, 0x4b, 0xab, 0xba, 0xe8, 0x55, 0x18, 0x13, 0xa1, 0x49, 0x78,
- 0x14, 0x1e, 0x66, 0x5d, 0xa3, 0x39, 0x53, 0x57, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x0d, 0x18, 0x89,
- 0xfc, 0x86, 0x90, 0x71, 0x86, 0xd3, 0xc7, 0xf3, 0xa3, 0xfb, 0xae, 0x29, 0x34, 0x5d, 0x7f, 0xa2,
- 0xaa, 0x62, 0x9d, 0x0e, 0x5a, 0xe3, 0xeb, 0x9d, 0xa5, 0x6e, 0x22, 0xa1, 0x48, 0x48, 0x7f, 0x3a,
- 0xcf, 0xf4, 0x92, 0xa1, 0x99, 0xdb, 0x41, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x19, 0xa6, 0x5a, 0x81,
- 0xeb, 0xb3, 0x35, 0xa1, 0xb4, 0xee, 0xd3, 0x66, 0xa2, 0xd6, 0x4a, 0x12, 0x01, 0xa7, 0xeb, 0xb0,
- 0xc8, 0x32, 0xa2, 0x70, 0xfa, 0x24, 0x4f, 0x36, 0xc7, 0xe5, 0x20, 0xbc, 0x0c, 0x2b, 0x28, 0x5a,
- 0x61, 0x27, 0x31, 0x17, 0xe1, 0x4d, 0xcf, 0xe4, 0xc7, 0x2b, 0xd0, 0x45, 0x7d, 0x9c, 0xf7, 0x57,
- 0x7f, 0x71, 0x4c, 0x01, 0xd5, 0xb5, 0x1c, 0xef, 0xf4, 0x05, 0x15, 0x4e, 0x9f, 0xea, 0x60, 0xfb,
- 0x9b, 0x78, 0x2e, 0xc7, 0x0c, 0x81, 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x06, 0x4c, 0x8a, 0xb0,
- 0x0b, 0xf1, 0x30, 0x9d, 0x8e, 0xfd, 0xa3, 0x70, 0x02, 0x86, 0x53, 0xd8, 0x3c, 0xd9, 0x9b, 0x73,
- 0xbb, 0x41, 0xc4, 0xd1, 0x77, 0xcd, 0xf5, 0xb6, 0xc2, 0xe9, 0x33, 0xec, 0x7c, 0x10, 0xc9, 0xde,
- 0x92, 0x50, 0x9c, 0x51, 0x03, 0xad, 0xc1, 0x64, 0x2b, 0x20, 0xa4, 0xc9, 0xde, 0x49, 0xe2, 0x3e,
- 0x2b, 0xf1, 0xc0, 0x4a, 0xb4, 0x27, 0x95, 0x04, 0x6c, 0x3f, 0xa3, 0x0c, 0xa7, 0x28, 0xa0, 0x3b,
- 0x30, 0xe4, 0x6f, 0x93, 0x60, 0x93, 0x38, 0xf5, 0xe9, 0xb3, 0x1d, 0xbc, 0xf6, 0xc4, 0xe5, 0x76,
- 0x5d, 0xe0, 0x26, 0x8c, 0x9a, 0x64, 0x71, 0x77, 0xa3, 0x26, 0xd9, 0x18, 0xfa, 0x4f, 0x2c, 0x38,
- 0x29, 0xd5, 0x84, 0xd5, 0x16, 0x1d, 0xf5, 0x05, 0xdf, 0x0b, 0xa3, 0x80, 0x87, 0x02, 0x7a, 0x34,
- 0x3f, 0x3c, 0xce, 0x5a, 0x4e, 0x25, 0xa5, 0x45, 0x38, 0x99, 0x87, 0x11, 0xe2, 0xfc, 0x16, 0xe9,
- 0xcb, 0x3e, 0x24, 0x91, 0x3c, 0x8c, 0xe6, 0xc2, 0xe5, 0xb7, 0x16, 0x57, 0xa7, 0x1f, 0xe3, 0x71,
- 0x8c, 0xe8, 0x66, 0xa8, 0x26, 0x81, 0x38, 0x8d, 0x8f, 0x2e, 0x42, 0xc1, 0x0f, 0xa7, 0x1f, 0x67,
- 0x6b, 0xfb, 0x64, 0xce, 0x38, 0x5e, 0xaf, 0x72, 0xe3, 0xd6, 0xeb, 0x55, 0x5c, 0xf0, 0x43, 0x99,
- 0x70, 0x8d, 0x3e, 0x67, 0xc3, 0xe9, 0x27, 0xb8, 0xcc, 0x59, 0x26, 0x5c, 0x63, 0x85, 0x38, 0x86,
- 0xa3, 0x4d, 0x98, 0x08, 0x0d, 0xb1, 0x41, 0x38, 0x7d, 0x8e, 0x8d, 0xd4, 0x13, 0x79, 0x93, 0x66,
- 0x60, 0x6b, 0x99, 0x90, 0x4c, 0x2a, 0x38, 0x49, 0x96, 0xef, 0x2e, 0x4d, 0x70, 0x11, 0x4e, 0x3f,
- 0xd9, 0x65, 0x77, 0x69, 0xc8, 0xfa, 0xee, 0xd2, 0x69, 0xe0, 0x04, 0x4d, 0x74, 0x43, 0x77, 0x89,
- 0x3c, 0x9f, 0x6f, 0x28, 0x99, 0xe9, 0x0c, 0x39, 0x96, 0xe7, 0x08, 0x39, 0xf3, 0x7d, 0x30, 0x95,
- 0xe2, 0xc2, 0x0e, 0xe2, 0x1f, 0x32, 0xb3, 0x05, 0x63, 0xc6, 0x4a, 0x7f, 0xa8, 0xe6, 0x43, 0x3f,
- 0x03, 0x30, 0xac, 0xcc, 0x3a, 0x72, 0xf4, 0x6c, 0x53, 0xf7, 0xa5, 0x67, 0xbb, 0x60, 0x5a, 0x1f,
- 0x9d, 0x4c, 0x5a, 0x1f, 0x0d, 0x55, 0xfc, 0xba, 0x61, 0x70, 0xb4, 0x96, 0x11, 0x41, 0x38, 0xef,
- 0x8c, 0xee, 0xdd, 0x21, 0x4e, 0x53, 0x55, 0x15, 0x7b, 0x36, 0x63, 0xea, 0xeb, 0xa8, 0xfd, 0xba,
- 0x0c, 0x53, 0x9e, 0xcf, 0x9e, 0x11, 0xa4, 0x2e, 0x79, 0x44, 0xc6, 0x0a, 0x0e, 0xeb, 0x11, 0xee,
- 0x12, 0x08, 0x38, 0x5d, 0x87, 0x36, 0xc8, 0x79, 0xb9, 0xa4, 0xba, 0x8d, 0xb3, 0x7a, 0x58, 0x40,
- 0xe9, 0xf3, 0x95, 0xff, 0x0a, 0xa7, 0x27, 0xf3, 0x9f, 0xaf, 0xbc, 0x52, 0x92, 0x5f, 0x0c, 0x25,
- 0xbf, 0xc8, 0xb4, 0x4b, 0x2d, 0xbf, 0x5e, 0xae, 0x88, 0x97, 0x88, 0x16, 0xdb, 0xbf, 0x5e, 0xae,
- 0x60, 0x0e, 0x43, 0x73, 0x30, 0xc0, 0x7e, 0xc8, 0xc8, 0x41, 0x79, 0x27, 0x49, 0xb9, 0xa2, 0xe5,
- 0xa4, 0x65, 0x15, 0xb0, 0xa8, 0xc8, 0xb4, 0x07, 0xf4, 0xf9, 0xc6, 0xb4, 0x07, 0x83, 0xf7, 0xa9,
- 0x3d, 0x90, 0x04, 0x70, 0x4c, 0x0b, 0xdd, 0x85, 0x63, 0xc6, 0x93, 0x59, 0x79, 0x08, 0x42, 0xbe,
- 0x91, 0x42, 0x02, 0x79, 0xfe, 0xb4, 0xe8, 0xf4, 0xb1, 0x72, 0x16, 0x25, 0x9c, 0xdd, 0x00, 0x6a,
- 0xc0, 0x54, 0x2d, 0xd5, 0xea, 0x50, 0xef, 0xad, 0xaa, 0x75, 0x91, 0x6e, 0x31, 0x4d, 0x18, 0xbd,
- 0x0a, 0x43, 0xef, 0xfa, 0xdc, 0xa0, 0x50, 0xbc, 0x9e, 0x64, 0x7c, 0x9b, 0xa1, 0xb7, 0xae, 0x57,
- 0x59, 0xf9, 0xfe, 0x6e, 0x69, 0xa4, 0xe2, 0xd7, 0xe5, 0x5f, 0xac, 0x2a, 0xa0, 0x1f, 0xb3, 0x60,
- 0x26, 0xfd, 0x26, 0x57, 0x9d, 0x1e, 0xeb, 0xbd, 0xd3, 0xb6, 0x68, 0x74, 0x66, 0x29, 0x97, 0x1c,
- 0xee, 0xd0, 0x14, 0xfa, 0x28, 0xdd, 0x4f, 0xa1, 0x7b, 0x8f, 0x88, 0x84, 0xfe, 0x8f, 0xc6, 0xfb,
- 0x89, 0x96, 0xee, 0xef, 0x96, 0x26, 0xf8, 0xe1, 0xed, 0xde, 0x53, 0x59, 0x08, 0x78, 0x05, 0xf4,
- 0x83, 0x70, 0x2c, 0x48, 0xcb, 0xc8, 0x89, 0x7c, 0x27, 0x3c, 0xdd, 0xcb, 0x45, 0x90, 0x9c, 0x70,
- 0x9c, 0x45, 0x10, 0x67, 0xb7, 0x63, 0xff, 0xa1, 0xc5, 0x74, 0x23, 0xa2, 0x5b, 0x24, 0x6c, 0x37,
- 0xa2, 0x43, 0x30, 0xe2, 0x5b, 0x32, 0x6c, 0x13, 0xee, 0xdb, 0x0a, 0xef, 0xbf, 0xb3, 0x98, 0x15,
- 0xde, 0x21, 0xfa, 0x13, 0xbe, 0x05, 0x43, 0x91, 0x68, 0x4d, 0x74, 0x3d, 0xcf, 0x62, 0x48, 0x76,
- 0x8a, 0x59, 0x22, 0xaa, 0x77, 0x98, 0x2c, 0xc5, 0x8a, 0x8c, 0xfd, 0x5f, 0xf3, 0x19, 0x90, 0x90,
- 0x43, 0x50, 0x01, 0x2f, 0x9a, 0x2a, 0xe0, 0x52, 0x97, 0x2f, 0xc8, 0x51, 0x05, 0xff, 0x57, 0x66,
- 0xbf, 0x99, 0xfc, 0xf1, 0xfd, 0x6e, 0xfe, 0x69, 0x7f, 0xd1, 0x02, 0x88, 0xd3, 0xbe, 0xf4, 0x90,
- 0xc0, 0xfb, 0x12, 0x7d, 0x79, 0xf9, 0x91, 0x5f, 0xf3, 0x1b, 0x42, 0x05, 0x75, 0x2a, 0xd6, 0x42,
- 0xf3, 0xf2, 0x7d, 0xed, 0x37, 0x56, 0xd8, 0xa8, 0x24, 0xe3, 0x30, 0x17, 0x63, 0xbb, 0x08, 0x23,
- 0x06, 0xf3, 0x57, 0x2c, 0x38, 0x9a, 0xe5, 0x9c, 0x42, 0xdf, 0xf1, 0x5c, 0x12, 0xab, 0x4c, 0x73,
- 0xd5, 0x6c, 0xde, 0x14, 0xe5, 0x58, 0x61, 0xf4, 0x9c, 0x19, 0xfd, 0x60, 0x29, 0x49, 0xae, 0xc3,
- 0x58, 0x25, 0x20, 0x1a, 0x7f, 0xf1, 0x7a, 0x9c, 0x2d, 0x69, 0x78, 0xfe, 0xd9, 0x03, 0x47, 0x7c,
- 0xb2, 0xbf, 0x5a, 0x80, 0xa3, 0xdc, 0xc0, 0x6c, 0x6e, 0xdb, 0x77, 0xeb, 0x15, 0xbf, 0x2e, 0x5c,
- 0x8a, 0xdf, 0x86, 0xd1, 0x96, 0x26, 0x3e, 0xef, 0x14, 0x5e, 0x5f, 0x17, 0xb3, 0xc7, 0x02, 0x3f,
- 0xbd, 0x14, 0x1b, 0xb4, 0x50, 0x1d, 0x46, 0xc9, 0xb6, 0x5b, 0x53, 0x96, 0x45, 0x85, 0x03, 0x5f,
- 0xd2, 0xaa, 0x95, 0x25, 0x8d, 0x0e, 0x36, 0xa8, 0xf6, 0x6c, 0x16, 0xae, 0xb1, 0x68, 0x7d, 0x5d,
- 0xac, 0x89, 0x7e, 0xce, 0x82, 0x13, 0x39, 0xc1, 0xf8, 0x69, 0x73, 0x77, 0x98, 0x29, 0x9f, 0x58,
- 0xb6, 0xaa, 0x39, 0x6e, 0xe0, 0x87, 0x05, 0x14, 0x7d, 0x1c, 0xa0, 0x15, 0xa7, 0x30, 0xed, 0x12,
- 0xb5, 0xdc, 0x88, 0x5f, 0xac, 0x85, 0xa2, 0x55, 0x99, 0x4e, 0x35, 0x5a, 0xf6, 0x57, 0xfa, 0xa0,
- 0x9f, 0x19, 0x71, 0xa1, 0x0a, 0x0c, 0x6e, 0xf2, 0x48, 0x89, 0x1d, 0xe7, 0x8d, 0xe2, 0xca, 0xd0,
- 0x8b, 0xf1, 0xbc, 0x69, 0xa5, 0x58, 0x92, 0x41, 0x2b, 0x70, 0x84, 0xa7, 0x67, 0x6d, 0x2c, 0x92,
- 0x86, 0xb3, 0x23, 0x25, 0xd3, 0x05, 0xf6, 0xa9, 0x4a, 0x42, 0x5f, 0x4e, 0xa3, 0xe0, 0xac, 0x7a,
- 0xe8, 0x75, 0x18, 0x8f, 0xdc, 0x26, 0xf1, 0xdb, 0x91, 0xa4, 0xc4, 0xf3, 0xa1, 0xaa, 0xc7, 0xd3,
- 0x9a, 0x01, 0xc5, 0x09, 0x6c, 0xf4, 0x2a, 0x8c, 0xb5, 0x52, 0x32, 0xf8, 0xfe, 0x58, 0x58, 0x65,
- 0xca, 0xdd, 0x4d, 0x5c, 0xe6, 0x9f, 0xd2, 0x66, 0xde, 0x38, 0x6b, 0x9b, 0x01, 0x09, 0x37, 0xfd,
- 0x46, 0x9d, 0x71, 0xc0, 0xfd, 0x9a, 0x7f, 0x4a, 0x02, 0x8e, 0x53, 0x35, 0x28, 0x95, 0x75, 0xc7,
- 0x6d, 0xb4, 0x03, 0x12, 0x53, 0x19, 0x30, 0xa9, 0x2c, 0x27, 0xe0, 0x38, 0x55, 0xa3, 0xbb, 0x72,
- 0x61, 0xf0, 0xc1, 0x28, 0x17, 0xec, 0x5f, 0x2d, 0x80, 0x31, 0xb5, 0xdf, 0xc3, 0xd9, 0x56, 0x5f,
- 0x83, 0xbe, 0x8d, 0xa0, 0x55, 0x13, 0x06, 0x8b, 0x99, 0x5f, 0x76, 0x19, 0x57, 0x16, 0xf4, 0x2f,
- 0xa3, 0xff, 0x31, 0xab, 0x45, 0xf7, 0xf8, 0xb1, 0x4a, 0xe0, 0xd3, 0x4b, 0x4e, 0x06, 0x53, 0x55,
- 0x6e, 0x60, 0x83, 0xf2, 0xbd, 0xde, 0x21, 0xec, 0xb8, 0xf0, 0x65, 0xe1, 0x14, 0x0c, 0xdb, 0xbe,
- 0xaa, 0x78, 0xad, 0x4b, 0x2a, 0xe8, 0x22, 0x8c, 0x88, 0x04, 0x98, 0xcc, 0x5b, 0x89, 0x6f, 0x26,
- 0x66, 0x8b, 0xb8, 0x18, 0x17, 0x63, 0x1d, 0xc7, 0xfe, 0xf1, 0x02, 0x1c, 0xc9, 0x70, 0x37, 0xe5,
- 0xd7, 0xc8, 0x86, 0x1b, 0x46, 0xc1, 0x4e, 0xf2, 0x72, 0xc2, 0xa2, 0x1c, 0x2b, 0x0c, 0x7a, 0x56,
- 0xf1, 0x8b, 0x2a, 0x79, 0x39, 0x09, 0x77, 0x2e, 0x01, 0x3d, 0xd8, 0xe5, 0x44, 0xaf, 0xed, 0x76,
- 0x48, 0x64, 0x86, 0x03, 0x75, 0x6d, 0x33, 0xc3, 0x05, 0x06, 0xa1, 0x4f, 0xc0, 0x0d, 0xa5, 0x8d,
- 0xd7, 0x9e, 0x80, 0x5c, 0x1f, 0xcf, 0x61, 0xb4, 0x73, 0x11, 0xf1, 0x1c, 0x2f, 0x12, 0x0f, 0xc5,
- 0x38, 0xf2, 0x35, 0x2b, 0xc5, 0x02, 0x6a, 0x7f, 0xb9, 0x08, 0x27, 0x73, 0x1d, 0xd0, 0x69, 0xd7,
- 0x9b, 0xbe, 0xe7, 0x46, 0xbe, 0x32, 0xf2, 0xe4, 0xd1, 0xae, 0x49, 0x6b, 0x73, 0x45, 0x94, 0x63,
- 0x85, 0x81, 0xce, 0x41, 0x3f, 0x93, 0xdb, 0x27, 0x93, 0xdf, 0xe1, 0xf9, 0x45, 0x1e, 0x0b, 0x94,
- 0x83, 0xb5, 0x5b, 0xbd, 0xd8, 0xf1, 0x56, 0x7f, 0x8c, 0x72, 0x30, 0x7e, 0x23, 0x79, 0xa1, 0xd0,
- 0xee, 0xfa, 0x7e, 0x03, 0x33, 0x20, 0x7a, 0x42, 0x8c, 0x57, 0xc2, 0xaa, 0x11, 0x3b, 0x75, 0x3f,
- 0xd4, 0x06, 0xed, 0x29, 0x18, 0xdc, 0x22, 0x3b, 0x81, 0xeb, 0x6d, 0x24, 0xad, 0x5d, 0xaf, 0xf2,
- 0x62, 0x2c, 0xe1, 0x66, 0x96, 0xf8, 0xc1, 0x07, 0x91, 0x25, 0x5e, 0x5f, 0x01, 0x43, 0x5d, 0xd9,
- 0x93, 0x9f, 0x28, 0xc2, 0x04, 0x9e, 0x5f, 0xfc, 0x60, 0x22, 0x6e, 0xa4, 0x27, 0xe2, 0x41, 0x24,
- 0x53, 0x3f, 0xd8, 0x6c, 0xfc, 0x9e, 0x05, 0x13, 0x2c, 0x0d, 0xa7, 0x88, 0x1e, 0xe3, 0xfa, 0xde,
- 0x21, 0x3c, 0x05, 0x1e, 0x83, 0xfe, 0x80, 0x36, 0x2a, 0x66, 0x50, 0xed, 0x71, 0xd6, 0x13, 0xcc,
- 0x61, 0xe8, 0x14, 0xf4, 0xb1, 0x2e, 0xd0, 0xc9, 0x1b, 0xe5, 0x47, 0xf0, 0xa2, 0x13, 0x39, 0x98,
- 0x95, 0xb2, 0x38, 0x96, 0x98, 0xb4, 0x1a, 0x2e, 0xef, 0x74, 0x6c, 0x55, 0xf1, 0xfe, 0x08, 0x4d,
- 0x93, 0xd9, 0xb5, 0xf7, 0x16, 0xc7, 0x32, 0x9b, 0x64, 0xe7, 0x67, 0xf6, 0x3f, 0x15, 0xe0, 0x4c,
- 0x66, 0xbd, 0x9e, 0xe3, 0x58, 0x76, 0xae, 0xfd, 0x30, 0x93, 0xf6, 0x15, 0x0f, 0xd1, 0x97, 0xa0,
- 0xaf, 0x57, 0xee, 0xbf, 0xbf, 0x87, 0xf0, 0x92, 0x99, 0x43, 0xf6, 0x3e, 0x09, 0x2f, 0x99, 0xd9,
- 0xb7, 0x1c, 0x31, 0xc1, 0xb7, 0x0b, 0x39, 0xdf, 0xc2, 0x04, 0x06, 0xe7, 0xe9, 0x39, 0xc3, 0x80,
- 0xa1, 0x7c, 0x84, 0xf3, 0x33, 0x86, 0x97, 0x61, 0x05, 0x45, 0x73, 0x30, 0xd1, 0x74, 0x3d, 0x7a,
- 0xf8, 0xec, 0x98, 0xac, 0xb8, 0x52, 0xb7, 0xac, 0x98, 0x60, 0x9c, 0xc4, 0x47, 0xae, 0x16, 0x7a,
- 0x92, 0x7f, 0xdd, 0xab, 0x07, 0xda, 0x75, 0xb3, 0xa6, 0xc5, 0x89, 0x1a, 0xc5, 0x8c, 0x30, 0x94,
- 0x2b, 0x9a, 0x9c, 0xa8, 0xd8, 0xbb, 0x9c, 0x68, 0x34, 0x5b, 0x46, 0x34, 0xf3, 0x2a, 0x8c, 0xdd,
- 0xb7, 0x9e, 0xc5, 0xfe, 0x66, 0x11, 0x1e, 0xe9, 0xb0, 0xed, 0xf9, 0x59, 0x6f, 0xcc, 0x81, 0x76,
- 0xd6, 0xa7, 0xe6, 0xa1, 0x02, 0x47, 0xd7, 0xdb, 0x8d, 0xc6, 0x0e, 0x73, 0xc0, 0x23, 0x75, 0x89,
- 0x21, 0x78, 0x4a, 0x29, 0x1c, 0x39, 0xba, 0x9c, 0x81, 0x83, 0x33, 0x6b, 0xd2, 0x27, 0x16, 0xbd,
- 0x49, 0x76, 0x14, 0xa9, 0xc4, 0x13, 0x0b, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0xcb, 0x30, 0xe5, 0x6c,
- 0x3b, 0x2e, 0x4f, 0x7a, 0x22, 0x09, 0xf0, 0x37, 0x96, 0x92, 0x45, 0xcf, 0x25, 0x11, 0x70, 0xba,
- 0x4e, 0x8e, 0x4a, 0xa8, 0x78, 0x5f, 0x2a, 0x21, 0x33, 0x08, 0xe2, 0x40, 0x7e, 0x10, 0xc4, 0xce,
- 0xe7, 0x62, 0xd7, 0x7c, 0x91, 0xef, 0xc0, 0xd8, 0x41, 0x2d, 0xc7, 0x9f, 0x82, 0xc1, 0x40, 0x64,
- 0xe2, 0x4f, 0x78, 0xbb, 0xcb, 0x3c, 0xe5, 0x12, 0x6e, 0xff, 0x6f, 0x16, 0x28, 0x59, 0xb2, 0x19,
- 0xef, 0xfc, 0x55, 0x66, 0x06, 0xcf, 0xa5, 0xe0, 0x5a, 0x88, 0xb3, 0x63, 0x9a, 0x19, 0x7c, 0x0c,
- 0xc4, 0x26, 0x2e, 0x5f, 0x6e, 0x61, 0x1c, 0x59, 0xc3, 0x78, 0x40, 0x08, 0x0d, 0xa4, 0xc2, 0x40,
- 0x9f, 0x80, 0xc1, 0xba, 0xbb, 0xed, 0x86, 0x42, 0x8e, 0x76, 0x60, 0x1d, 0x60, 0xfc, 0x7d, 0x8b,
- 0x9c, 0x0c, 0x96, 0xf4, 0xec, 0x9f, 0xb2, 0x40, 0xa9, 0x4e, 0xaf, 0x10, 0xa7, 0x11, 0x6d, 0xa2,
- 0x37, 0x00, 0x24, 0x05, 0x25, 0x7b, 0x93, 0x06, 0x5d, 0x80, 0x15, 0x64, 0xdf, 0xf8, 0x87, 0xb5,
- 0x3a, 0xe8, 0x75, 0x18, 0xd8, 0x64, 0xb4, 0xc4, 0xb7, 0x9d, 0x53, 0xaa, 0x2e, 0x56, 0xba, 0xbf,
- 0x5b, 0x3a, 0x6a, 0xb6, 0x29, 0x6f, 0x31, 0x5e, 0xcb, 0xfe, 0x89, 0x42, 0x3c, 0xa7, 0x6f, 0xb5,
- 0xfd, 0xc8, 0x39, 0x04, 0x4e, 0xe4, 0xb2, 0xc1, 0x89, 0x3c, 0xd1, 0x49, 0x37, 0xcc, 0xba, 0x94,
- 0xcb, 0x81, 0x5c, 0x4f, 0x70, 0x20, 0x4f, 0x76, 0x27, 0xd5, 0x99, 0xf3, 0xf8, 0x6f, 0x2c, 0x98,
- 0x32, 0xf0, 0x0f, 0xe1, 0x02, 0x5c, 0x36, 0x2f, 0xc0, 0x47, 0xbb, 0x7e, 0x43, 0xce, 0xc5, 0xf7,
- 0xa3, 0xc5, 0x44, 0xdf, 0xd9, 0x85, 0xf7, 0x2e, 0xf4, 0x6d, 0x3a, 0x41, 0x5d, 0xbc, 0xeb, 0x2f,
- 0xf4, 0x34, 0xd6, 0xb3, 0x57, 0x9c, 0x40, 0x18, 0x83, 0x3c, 0x2b, 0x47, 0x9d, 0x16, 0x75, 0x35,
- 0x04, 0x61, 0x4d, 0xa1, 0x4b, 0x30, 0x10, 0xd6, 0xfc, 0x96, 0xf2, 0x29, 0x64, 0x49, 0xd4, 0xab,
- 0xac, 0x64, 0x7f, 0xb7, 0x84, 0xcc, 0xe6, 0x68, 0x31, 0x16, 0xf8, 0xe8, 0x6d, 0x18, 0x63, 0xbf,
- 0x94, 0x65, 0x66, 0x31, 0x5f, 0x02, 0x53, 0xd5, 0x11, 0xb9, 0xd9, 0xb2, 0x51, 0x84, 0x4d, 0x52,
- 0x33, 0x1b, 0x30, 0xac, 0x3e, 0xeb, 0xa1, 0x6a, 0xfe, 0xff, 0xba, 0x08, 0x47, 0x32, 0xd6, 0x1c,
- 0x0a, 0x8d, 0x99, 0xb8, 0xd8, 0xe3, 0x52, 0x7d, 0x8f, 0x73, 0x11, 0xb2, 0x07, 0x60, 0x5d, 0xac,
- 0xad, 0x9e, 0x1b, 0xbd, 0x11, 0x92, 0x64, 0xa3, 0xb4, 0xa8, 0x7b, 0xa3, 0xb4, 0xb1, 0x43, 0x1b,
- 0x6a, 0xda, 0x90, 0xea, 0xe9, 0x43, 0x9d, 0xd3, 0x3f, 0xe9, 0x83, 0xa3, 0x59, 0xe6, 0x2a, 0xe8,
- 0x73, 0x30, 0xc0, 0x9c, 0xde, 0xa4, 0xe0, 0xec, 0xc5, 0x5e, 0x0d, 0x5d, 0x66, 0x99, 0xdf, 0x9c,
- 0x08, 0x99, 0x3b, 0x2b, 0x8f, 0x23, 0x5e, 0xd8, 0x75, 0x98, 0x45, 0x9b, 0x2c, 0x94, 0x95, 0xb8,
- 0x3d, 0xe5, 0xf1, 0xf1, 0x91, 0x9e, 0x3b, 0x20, 0xee, 0xdf, 0x30, 0x61, 0xf5, 0x25, 0x8b, 0xbb,
- 0x5b, 0x7d, 0xc9, 0x96, 0x51, 0x19, 0x06, 0x6a, 0xdc, 0x9c, 0xa8, 0xd8, 0xfd, 0x08, 0xe3, 0xb6,
- 0x44, 0xea, 0x00, 0x16, 0x36, 0x44, 0x82, 0xc0, 0x8c, 0x0b, 0x23, 0xda, 0xc0, 0x3c, 0xd4, 0xc5,
- 0xb3, 0x45, 0x2f, 0x3e, 0x6d, 0x08, 0x1e, 0xea, 0x02, 0xfa, 0x59, 0xed, 0xee, 0x17, 0xe7, 0xc1,
- 0x87, 0x0d, 0xde, 0xe9, 0x54, 0xc2, 0x15, 0x31, 0xb1, 0xaf, 0x18, 0x2f, 0x55, 0x35, 0x63, 0xcd,
- 0xe7, 0x26, 0xcc, 0x32, 0x2f, 0xfc, 0xce, 0xf1, 0xe5, 0xed, 0x9f, 0xb3, 0x20, 0xe1, 0x2c, 0xa6,
- 0xc4, 0x9d, 0x56, 0xae, 0xb8, 0xf3, 0x2c, 0xf4, 0x05, 0x7e, 0x43, 0xf2, 0x53, 0x0a, 0x03, 0xfb,
- 0x0d, 0x82, 0x19, 0x84, 0x62, 0x44, 0xb1, 0x10, 0x6b, 0x54, 0x7f, 0xa0, 0x8b, 0xa7, 0xf7, 0x63,
- 0xd0, 0xdf, 0x20, 0xdb, 0xa4, 0x91, 0xcc, 0x1b, 0x7b, 0x8d, 0x16, 0x62, 0x0e, 0xb3, 0x7f, 0xaf,
- 0x0f, 0x4e, 0x77, 0x8c, 0x78, 0x47, 0x19, 0xcc, 0x0d, 0x27, 0x22, 0x77, 0x9c, 0x9d, 0x64, 0xbe,
- 0xc4, 0xcb, 0xbc, 0x18, 0x4b, 0x38, 0x73, 0xdc, 0xe6, 0x39, 0x80, 0x12, 0xc2, 0x61, 0x91, 0xfa,
- 0x47, 0x40, 0x4d, 0x61, 0x63, 0xf1, 0x41, 0x08, 0x1b, 0x9f, 0x07, 0x08, 0xc3, 0x06, 0xb7, 0x09,
- 0xad, 0x0b, 0x8f, 0xf0, 0x38, 0x57, 0x54, 0xf5, 0x9a, 0x80, 0x60, 0x0d, 0x0b, 0x2d, 0xc2, 0x64,
- 0x2b, 0xf0, 0x23, 0x2e, 0x6b, 0x5f, 0xe4, 0x66, 0xd3, 0xfd, 0x66, 0xb0, 0xb1, 0x4a, 0x02, 0x8e,
- 0x53, 0x35, 0xd0, 0x4b, 0x30, 0x22, 0x02, 0x90, 0x55, 0x7c, 0xbf, 0x21, 0xc4, 0x7b, 0xca, 0x92,
- 0xb8, 0x1a, 0x83, 0xb0, 0x8e, 0xa7, 0x55, 0x63, 0x02, 0xfc, 0xc1, 0xcc, 0x6a, 0x5c, 0x88, 0xaf,
- 0xe1, 0x25, 0x92, 0x15, 0x0c, 0xf5, 0x94, 0xac, 0x20, 0x16, 0x78, 0x0e, 0xf7, 0xac, 0x4f, 0x86,
- 0xae, 0x22, 0xc2, 0xaf, 0xf5, 0xc1, 0x11, 0xb1, 0x70, 0x1e, 0xf6, 0x72, 0xb9, 0x91, 0x5e, 0x2e,
- 0x0f, 0x42, 0x24, 0xfa, 0xc1, 0x9a, 0x39, 0xec, 0x35, 0xf3, 0x93, 0x16, 0x98, 0x3c, 0x24, 0xfa,
- 0x8f, 0x72, 0x13, 0xce, 0xbe, 0x94, 0xcb, 0x93, 0xc6, 0x91, 0xcc, 0xdf, 0x5b, 0xea, 0x59, 0xfb,
- 0x7f, 0xb1, 0xe0, 0xd1, 0xae, 0x14, 0xd1, 0x12, 0x0c, 0x33, 0x46, 0x57, 0x7b, 0x17, 0x3f, 0xa9,
- 0xdc, 0x2a, 0x24, 0x20, 0x87, 0xef, 0x8e, 0x6b, 0xa2, 0xa5, 0x54, 0x66, 0xdf, 0xa7, 0x32, 0x32,
- 0xfb, 0x1e, 0x33, 0x86, 0xe7, 0x3e, 0x53, 0xfb, 0x7e, 0x89, 0xde, 0x38, 0xa6, 0x6f, 0xe6, 0x47,
- 0x0c, 0x71, 0xae, 0x9d, 0x10, 0xe7, 0x22, 0x13, 0x5b, 0xbb, 0x43, 0xde, 0x80, 0x49, 0x16, 0x99,
- 0x94, 0x39, 0xf9, 0x08, 0xa7, 0xce, 0x42, 0x6c, 0xc8, 0x7f, 0x2d, 0x01, 0xc3, 0x29, 0x6c, 0xfb,
- 0x1f, 0x8a, 0x30, 0xc0, 0xb7, 0xdf, 0x21, 0x3c, 0x7c, 0x9f, 0x81, 0x61, 0xb7, 0xd9, 0x6c, 0xf3,
- 0x64, 0xad, 0xfd, 0xb1, 0x59, 0x78, 0x59, 0x16, 0xe2, 0x18, 0x8e, 0x96, 0x85, 0x26, 0xa1, 0x43,
- 0xf0, 0x73, 0xde, 0xf1, 0xd9, 0x45, 0x27, 0x72, 0x38, 0x17, 0xa7, 0xee, 0xd9, 0x58, 0xe7, 0x80,
- 0x3e, 0x0d, 0x10, 0x46, 0x81, 0xeb, 0x6d, 0xd0, 0x32, 0x91, 0x21, 0xe3, 0xe9, 0x0e, 0xd4, 0xaa,
- 0x0a, 0x99, 0xd3, 0x8c, 0xcf, 0x1c, 0x05, 0xc0, 0x1a, 0x45, 0x34, 0x6b, 0xdc, 0xf4, 0x33, 0x89,
- 0xb9, 0x03, 0x4e, 0x35, 0x9e, 0xb3, 0x99, 0x97, 0x61, 0x58, 0x11, 0xef, 0x26, 0x57, 0x1c, 0xd5,
- 0x19, 0xb6, 0x8f, 0xc1, 0x44, 0xa2, 0x6f, 0x07, 0x12, 0x4b, 0xfe, 0xbe, 0x05, 0x13, 0xbc, 0x33,
- 0x4b, 0xde, 0xb6, 0xb8, 0x0d, 0xee, 0xc1, 0xd1, 0x46, 0xc6, 0xa9, 0x2c, 0xa6, 0xbf, 0xf7, 0x53,
- 0x5c, 0x89, 0x21, 0xb3, 0xa0, 0x38, 0xb3, 0x0d, 0x74, 0x9e, 0xee, 0x38, 0x7a, 0xea, 0x3a, 0x0d,
- 0x11, 0x99, 0x64, 0x94, 0xef, 0x36, 0x5e, 0x86, 0x15, 0xd4, 0xfe, 0x5b, 0x0b, 0xa6, 0x78, 0xcf,
- 0xaf, 0x92, 0x1d, 0x75, 0x36, 0x7d, 0x27, 0xfb, 0x2e, 0xd2, 0x84, 0x17, 0x72, 0xd2, 0x84, 0xeb,
- 0x9f, 0x56, 0xec, 0xf8, 0x69, 0x5f, 0xb5, 0x40, 0xac, 0x90, 0x43, 0x90, 0xb4, 0x7c, 0x9f, 0x29,
- 0x69, 0x99, 0xc9, 0xdf, 0x04, 0x39, 0x22, 0x96, 0x7f, 0xb3, 0x60, 0x92, 0x23, 0xc4, 0x56, 0x10,
- 0xdf, 0xd1, 0x79, 0x98, 0x37, 0xbf, 0x28, 0xd3, 0xac, 0xf5, 0x2a, 0xd9, 0x59, 0xf3, 0x2b, 0x4e,
- 0xb4, 0x99, 0xfd, 0x51, 0xc6, 0x64, 0xf5, 0x75, 0x9c, 0xac, 0xba, 0xdc, 0x40, 0x46, 0x42, 0xc8,
- 0x2e, 0x02, 0xe0, 0x83, 0x26, 0x84, 0xb4, 0xff, 0xd1, 0x02, 0xc4, 0x9b, 0x31, 0x18, 0x37, 0xca,
- 0x0e, 0xb1, 0x52, 0xed, 0xa2, 0x8b, 0x8f, 0x26, 0x05, 0xc1, 0x1a, 0xd6, 0x03, 0x19, 0x9e, 0x84,
- 0x29, 0x4b, 0xb1, 0xbb, 0x29, 0xcb, 0x01, 0x46, 0xf4, 0xab, 0x83, 0x90, 0x74, 0xeb, 0x44, 0x37,
- 0x61, 0xb4, 0xe6, 0xb4, 0x9c, 0xdb, 0x6e, 0xc3, 0x8d, 0x5c, 0x12, 0x76, 0xb2, 0x73, 0x5b, 0xd0,
- 0xf0, 0x84, 0xf1, 0x81, 0x56, 0x82, 0x0d, 0x3a, 0x68, 0x16, 0xa0, 0x15, 0xb8, 0xdb, 0x6e, 0x83,
- 0x6c, 0x30, 0x81, 0x10, 0x8b, 0x85, 0xc4, 0x8d, 0xee, 0x64, 0x29, 0xd6, 0x30, 0x32, 0x42, 0x90,
- 0x14, 0x1f, 0x72, 0x08, 0x12, 0x38, 0xb4, 0x10, 0x24, 0x7d, 0x07, 0x0a, 0x41, 0x32, 0x74, 0xe0,
- 0x10, 0x24, 0xfd, 0x3d, 0x85, 0x20, 0xc1, 0x70, 0x5c, 0xf2, 0x9e, 0xf4, 0xff, 0xb2, 0xdb, 0x20,
- 0xe2, 0xc1, 0xc1, 0x03, 0x38, 0xcd, 0xec, 0xed, 0x96, 0x8e, 0xe3, 0x4c, 0x0c, 0x9c, 0x53, 0x13,
- 0x7d, 0x1c, 0xa6, 0x9d, 0x46, 0xc3, 0xbf, 0xa3, 0x26, 0x75, 0x29, 0xac, 0x39, 0x8d, 0x38, 0xae,
- 0xdf, 0xd0, 0xfc, 0xa9, 0xbd, 0xdd, 0xd2, 0xf4, 0x5c, 0x0e, 0x0e, 0xce, 0xad, 0x8d, 0x5e, 0x83,
- 0xe1, 0x56, 0xe0, 0xd7, 0x56, 0x34, 0xdf, 0xf3, 0x33, 0x74, 0x00, 0x2b, 0xb2, 0x70, 0x7f, 0xb7,
- 0x34, 0xa6, 0xfe, 0xb0, 0x0b, 0x3f, 0xae, 0x90, 0x11, 0xdd, 0x63, 0xe4, 0x61, 0x47, 0xf7, 0x18,
- 0x7d, 0xc0, 0xd1, 0x3d, 0xec, 0x2d, 0x38, 0x52, 0x25, 0x81, 0xeb, 0x34, 0xdc, 0x7b, 0x94, 0x27,
- 0x97, 0x67, 0xe0, 0x1a, 0x0c, 0x07, 0x89, 0x53, 0xbf, 0xa7, 0xa0, 0xe7, 0x9a, 0x5c, 0x46, 0x9e,
- 0xf2, 0x31, 0x21, 0xfb, 0xff, 0xb7, 0x60, 0x50, 0xb8, 0x8a, 0x1e, 0x02, 0x67, 0x3a, 0x67, 0xa8,
- 0x64, 0x4a, 0xd9, 0x93, 0xc2, 0x3a, 0x93, 0xab, 0x8c, 0x29, 0x27, 0x94, 0x31, 0x8f, 0x76, 0x22,
- 0xd2, 0x59, 0x0d, 0xf3, 0x9f, 0x15, 0xe9, 0x0b, 0xc1, 0x08, 0x5a, 0xf0, 0xf0, 0x87, 0x60, 0x15,
- 0x06, 0x43, 0xe1, 0x34, 0x5f, 0xc8, 0xf7, 0xe5, 0x49, 0x4e, 0x62, 0x6c, 0x03, 0x29, 0xdc, 0xe4,
- 0x25, 0x91, 0x4c, 0x6f, 0xfc, 0xe2, 0x43, 0xf4, 0xc6, 0xef, 0x16, 0xd6, 0xa1, 0xef, 0x41, 0x84,
- 0x75, 0xb0, 0xbf, 0xce, 0x6e, 0x67, 0xbd, 0xfc, 0x10, 0x18, 0xb7, 0xcb, 0xe6, 0x3d, 0x6e, 0x77,
- 0x58, 0x59, 0xa2, 0x53, 0x39, 0x0c, 0xdc, 0xef, 0x5a, 0x70, 0x3a, 0xe3, 0xab, 0x34, 0x6e, 0xee,
- 0x59, 0x18, 0x72, 0xda, 0x75, 0x57, 0xed, 0x65, 0x4d, 0x5b, 0x3c, 0x27, 0xca, 0xb1, 0xc2, 0x40,
- 0x0b, 0x30, 0x45, 0xee, 0xb6, 0x5c, 0xae, 0x86, 0xd7, 0x4d, 0xc7, 0x8b, 0xdc, 0xbf, 0x78, 0x29,
- 0x09, 0xc4, 0x69, 0x7c, 0x15, 0x1a, 0xae, 0x98, 0x1b, 0x1a, 0xee, 0x37, 0x2d, 0x18, 0x51, 0x6e,
- 0xe3, 0x0f, 0x7d, 0xb4, 0xdf, 0x30, 0x47, 0xfb, 0x91, 0x0e, 0xa3, 0x9d, 0x33, 0xcc, 0x7f, 0x53,
- 0x50, 0xfd, 0xad, 0xf8, 0x41, 0xd4, 0x03, 0x97, 0x78, 0xff, 0x6e, 0x2f, 0x17, 0x61, 0xc4, 0x69,
- 0xb5, 0x24, 0x40, 0xda, 0x2f, 0xb2, 0x14, 0x16, 0x71, 0x31, 0xd6, 0x71, 0x94, 0x17, 0x4e, 0x31,
- 0xd7, 0x0b, 0xa7, 0x0e, 0x10, 0x39, 0xc1, 0x06, 0x89, 0x68, 0x99, 0x30, 0xb7, 0xce, 0x3f, 0x6f,
- 0xda, 0x91, 0xdb, 0x98, 0x75, 0xbd, 0x28, 0x8c, 0x82, 0xd9, 0xb2, 0x17, 0x5d, 0x0f, 0xf8, 0x33,
- 0x55, 0x0b, 0xc0, 0xa8, 0x68, 0x61, 0x8d, 0xae, 0x0c, 0x91, 0xc2, 0xda, 0xe8, 0x37, 0x0d, 0x61,
- 0x56, 0x45, 0x39, 0x56, 0x18, 0xf6, 0xcb, 0xec, 0xf6, 0x61, 0x63, 0x7a, 0xb0, 0xc0, 0x82, 0xff,
- 0x34, 0xaa, 0x66, 0x83, 0xa9, 0x84, 0x17, 0xf5, 0xf0, 0x85, 0x9d, 0x0f, 0x7b, 0xda, 0xb0, 0xee,
- 0xcf, 0x1a, 0xc7, 0x38, 0x44, 0x9f, 0x4c, 0x19, 0x37, 0x3d, 0xd7, 0xe5, 0xd6, 0x38, 0x80, 0x39,
- 0x13, 0xcb, 0x67, 0xc7, 0xb2, 0x7d, 0x95, 0x2b, 0x62, 0x5f, 0x68, 0xf9, 0xec, 0x04, 0x00, 0xc7,
- 0x38, 0x94, 0x61, 0x53, 0x7f, 0xc2, 0x69, 0x14, 0x87, 0x3d, 0x57, 0xd8, 0x21, 0xd6, 0x30, 0xd0,
- 0x05, 0x21, 0xb4, 0xe0, 0xba, 0x87, 0x47, 0x12, 0x42, 0x0b, 0x39, 0x5c, 0x9a, 0xa4, 0xe9, 0x22,
- 0x8c, 0x90, 0xbb, 0x11, 0x09, 0x3c, 0xa7, 0x41, 0x5b, 0xe8, 0x8f, 0xa3, 0xeb, 0x2e, 0xc5, 0xc5,
- 0x58, 0xc7, 0x41, 0x6b, 0x30, 0x11, 0x72, 0x59, 0x9e, 0x4a, 0xb6, 0xc1, 0x65, 0xa2, 0x4f, 0x2b,
- 0x87, 0x7d, 0x13, 0xbc, 0xcf, 0x8a, 0xf8, 0xe9, 0x24, 0xc3, 0x98, 0x24, 0x49, 0xa0, 0xd7, 0x61,
- 0xbc, 0xe1, 0x3b, 0xf5, 0x79, 0xa7, 0xe1, 0x78, 0x35, 0x36, 0x3e, 0x43, 0x46, 0x2c, 0xcb, 0xf1,
- 0x6b, 0x06, 0x14, 0x27, 0xb0, 0x29, 0x83, 0xa8, 0x97, 0x88, 0x04, 0x31, 0x8e, 0xb7, 0x41, 0xc2,
- 0xe9, 0x61, 0xf6, 0x55, 0x8c, 0x41, 0xbc, 0x96, 0x83, 0x83, 0x73, 0x6b, 0xa3, 0x4b, 0x30, 0x2a,
- 0x3f, 0x5f, 0x8b, 0xfa, 0x13, 0x3b, 0x34, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0x10, 0x8e, 0xc9, 0xff,
- 0x6b, 0x81, 0xb3, 0xbe, 0xee, 0xd6, 0x44, 0x28, 0x0c, 0xee, 0xfc, 0xfd, 0x31, 0xe9, 0x69, 0xba,
- 0x94, 0x85, 0xb4, 0xbf, 0x5b, 0x3a, 0x25, 0x46, 0x2d, 0x13, 0x8e, 0xb3, 0x69, 0xa3, 0x15, 0x38,
- 0xc2, 0x6d, 0x60, 0x16, 0x36, 0x49, 0x6d, 0x4b, 0x6e, 0x38, 0xc6, 0x35, 0x6a, 0x8e, 0x3f, 0x57,
- 0xd2, 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x07, 0xa6, 0x5b, 0xed, 0xdb, 0x0d, 0x37, 0xdc, 0x5c, 0xf5,
- 0x23, 0x66, 0x42, 0x36, 0x57, 0xaf, 0x07, 0x24, 0xe4, 0xbe, 0xc1, 0xec, 0xea, 0x95, 0x91, 0x9a,
- 0x2a, 0x39, 0x78, 0x38, 0x97, 0x02, 0xba, 0x07, 0xc7, 0x12, 0x0b, 0x41, 0x84, 0x5c, 0x19, 0xcf,
- 0x4f, 0xb5, 0x55, 0xcd, 0xaa, 0x20, 0xa2, 0x17, 0x65, 0x81, 0x70, 0x76, 0x13, 0xe8, 0x15, 0x00,
- 0xb7, 0xb5, 0xec, 0x34, 0xdd, 0x06, 0x7d, 0x8e, 0x1e, 0x61, 0x6b, 0x84, 0x3e, 0x4d, 0xa0, 0x5c,
- 0x91, 0xa5, 0xf4, 0x6c, 0x16, 0xff, 0x76, 0xb0, 0x86, 0x8d, 0xae, 0xc1, 0xb8, 0xf8, 0xb7, 0x23,
- 0xa6, 0x74, 0x4a, 0x65, 0x65, 0x1d, 0x97, 0x35, 0xd4, 0x3c, 0x26, 0x4a, 0x70, 0xa2, 0x2e, 0xda,
- 0x80, 0xd3, 0x32, 0x25, 0xac, 0xbe, 0x3e, 0xe5, 0x1c, 0x84, 0x2c, 0xbf, 0xd5, 0x10, 0xf7, 0x29,
- 0x9a, 0xeb, 0x84, 0x88, 0x3b, 0xd3, 0xa1, 0xf7, 0xba, 0xbe, 0xcc, 0xb9, 0xc7, 0xf8, 0xb1, 0x38,
- 0x22, 0xe8, 0xb5, 0x24, 0x10, 0xa7, 0xf1, 0x91, 0x0f, 0xc7, 0x5c, 0x2f, 0x6b, 0x55, 0x1f, 0x67,
- 0x84, 0x3e, 0xca, 0x9d, 0xe5, 0x3b, 0xaf, 0xe8, 0x4c, 0x38, 0xce, 0xa6, 0x8b, 0xca, 0x70, 0x24,
- 0xe2, 0x05, 0x8b, 0x6e, 0xc8, 0xd3, 0xe7, 0xd0, 0x67, 0xdf, 0x09, 0xd6, 0xdc, 0x09, 0xba, 0x9a,
- 0xd7, 0xd2, 0x60, 0x9c, 0x55, 0xe7, 0xbd, 0x19, 0x80, 0x7e, 0xc3, 0xa2, 0xb5, 0x35, 0x46, 0x1f,
- 0x7d, 0x06, 0x46, 0xf5, 0xf1, 0x11, 0x4c, 0xcb, 0xb9, 0x6c, 0x3e, 0x58, 0x3b, 0x5e, 0xf8, 0x33,
- 0x41, 0x1d, 0x21, 0x3a, 0x0c, 0x1b, 0x14, 0x51, 0x2d, 0x23, 0xc8, 0xc5, 0x85, 0xde, 0x98, 0xa2,
- 0xde, 0xed, 0x1f, 0x09, 0x64, 0xef, 0x1c, 0x74, 0x0d, 0x86, 0x6a, 0x0d, 0x97, 0x78, 0x51, 0xb9,
- 0xd2, 0x29, 0x50, 0xeb, 0x82, 0xc0, 0x11, 0x5b, 0x51, 0x64, 0xbd, 0xe2, 0x65, 0x58, 0x51, 0xb0,
- 0x2f, 0xc1, 0x48, 0xb5, 0x41, 0x48, 0x8b, 0xfb, 0x71, 0xa1, 0xa7, 0xd8, 0xc3, 0x84, 0xb1, 0x96,
- 0x16, 0x63, 0x2d, 0xf5, 0x37, 0x07, 0x63, 0x2a, 0x25, 0xdc, 0xfe, 0xb3, 0x02, 0x94, 0xba, 0x24,
- 0x5f, 0x4b, 0xe8, 0xdb, 0xac, 0x9e, 0xf4, 0x6d, 0x73, 0x30, 0x11, 0xff, 0xd3, 0x45, 0x79, 0xca,
- 0x18, 0xfa, 0xa6, 0x09, 0xc6, 0x49, 0xfc, 0x9e, 0xfd, 0x5a, 0x74, 0x95, 0x5d, 0x5f, 0x57, 0xcf,
- 0x2c, 0x43, 0x55, 0xdf, 0xdf, 0xfb, 0xdb, 0x3b, 0x57, 0xed, 0x6a, 0x7f, 0xbd, 0x00, 0xc7, 0xd4,
- 0x10, 0x7e, 0xef, 0x0e, 0xdc, 0x8d, 0xf4, 0xc0, 0x3d, 0x00, 0xa5, 0xb5, 0x7d, 0x1d, 0x06, 0x78,
- 0xf4, 0xd8, 0x1e, 0x78, 0xfe, 0xc7, 0xcc, 0x40, 0xfe, 0x8a, 0xcd, 0x34, 0x82, 0xf9, 0xff, 0x98,
- 0x05, 0x13, 0x09, 0x07, 0x49, 0x84, 0x35, 0x2f, 0xfa, 0xfb, 0xe1, 0xcb, 0xb3, 0x38, 0xfe, 0xb3,
- 0xd0, 0xb7, 0xe9, 0x2b, 0x23, 0x65, 0x85, 0x71, 0xc5, 0x0f, 0x23, 0xcc, 0x20, 0xf6, 0xdf, 0x59,
- 0xd0, 0xbf, 0xe6, 0xb8, 0x5e, 0x24, 0xb5, 0x1f, 0x56, 0x8e, 0xf6, 0xa3, 0x97, 0xef, 0x42, 0x2f,
- 0xc1, 0x00, 0x59, 0x5f, 0x27, 0xb5, 0x48, 0xcc, 0xaa, 0x8c, 0xa6, 0x31, 0xb0, 0xc4, 0x4a, 0x29,
- 0x13, 0xca, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0xb7, 0x60, 0x38, 0x72, 0x9b, 0x64, 0xae, 0x5e,
- 0x17, 0x36, 0x01, 0xf7, 0x11, 0x02, 0x66, 0x4d, 0x12, 0xc0, 0x31, 0x2d, 0xfb, 0xcb, 0x05, 0x80,
- 0x38, 0x5a, 0x5d, 0xb7, 0x4f, 0x9c, 0x4f, 0x69, 0x8b, 0xcf, 0x65, 0x68, 0x8b, 0x51, 0x4c, 0x30,
- 0x43, 0x55, 0xac, 0x86, 0xa9, 0xd8, 0xd3, 0x30, 0xf5, 0x1d, 0x64, 0x98, 0x16, 0x60, 0x2a, 0x8e,
- 0xb6, 0x67, 0x06, 0x1b, 0x65, 0xf7, 0xf7, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0x36, 0x81, 0xb3, 0x2a,
- 0xe8, 0x98, 0xb8, 0x0b, 0x99, 0x2b, 0x81, 0xae, 0x7d, 0xef, 0x32, 0x4e, 0xb1, 0x3a, 0xbc, 0x90,
- 0xab, 0x0e, 0xff, 0x45, 0x0b, 0x8e, 0x26, 0xdb, 0x61, 0x7e, 0xf7, 0x5f, 0xb4, 0xe0, 0x58, 0x9c,
- 0x7b, 0x28, 0x6d, 0x82, 0xf0, 0x62, 0xc7, 0x40, 0x6a, 0x39, 0x3d, 0x8e, 0xc3, 0xb6, 0xac, 0x64,
- 0x91, 0xc6, 0xd9, 0x2d, 0xda, 0xff, 0x5f, 0x1f, 0x4c, 0xe7, 0x45, 0x60, 0x63, 0x9e, 0x46, 0xce,
- 0xdd, 0xea, 0x16, 0xb9, 0x23, 0xfc, 0x39, 0x62, 0x4f, 0x23, 0x5e, 0x8c, 0x25, 0x3c, 0x99, 0x6e,
- 0xaa, 0xd0, 0x63, 0xba, 0xa9, 0x4d, 0x98, 0xba, 0xb3, 0x49, 0xbc, 0x1b, 0x5e, 0xe8, 0x44, 0x6e,
- 0xb8, 0xee, 0x32, 0x05, 0x3a, 0x5f, 0x37, 0xaf, 0x48, 0xaf, 0x8b, 0x5b, 0x49, 0x84, 0xfd, 0xdd,
- 0xd2, 0x69, 0xa3, 0x20, 0xee, 0x32, 0x3f, 0x48, 0x70, 0x9a, 0x68, 0x3a, 0x5b, 0x57, 0xdf, 0x43,
- 0xce, 0xd6, 0xd5, 0x74, 0x85, 0xd9, 0x8d, 0x74, 0x23, 0x61, 0xcf, 0xd6, 0x15, 0x55, 0x8a, 0x35,
- 0x0c, 0xf4, 0x29, 0x40, 0x7a, 0xba, 0x45, 0x23, 0x00, 0xee, 0x73, 0x7b, 0xbb, 0x25, 0xb4, 0x9a,
- 0x82, 0xee, 0xef, 0x96, 0x8e, 0xd0, 0xd2, 0xb2, 0x47, 0x9f, 0xbf, 0x71, 0xd4, 0xc0, 0x0c, 0x42,
- 0xe8, 0x16, 0x4c, 0xd2, 0x52, 0xb6, 0xa3, 0x64, 0x74, 0x5d, 0xfe, 0x64, 0x7d, 0x66, 0x6f, 0xb7,
- 0x34, 0xb9, 0x9a, 0x80, 0xe5, 0x91, 0x4e, 0x11, 0xc9, 0x48, 0xda, 0x35, 0xd4, 0x6b, 0xd2, 0x2e,
- 0xfb, 0x8b, 0x16, 0x9c, 0xa4, 0x17, 0x5c, 0xfd, 0x5a, 0x8e, 0x16, 0xdd, 0x69, 0xb9, 0x5c, 0x4f,
- 0x23, 0xae, 0x1a, 0x26, 0xab, 0xab, 0x94, 0xb9, 0x96, 0x46, 0x41, 0xe9, 0x09, 0xbf, 0xe5, 0x7a,
- 0xf5, 0xe4, 0x09, 0x7f, 0xd5, 0xf5, 0xea, 0x98, 0x41, 0xd4, 0x95, 0x55, 0xcc, 0x8d, 0xd6, 0xff,
- 0x35, 0xba, 0x57, 0x69, 0x5f, 0xbe, 0xa3, 0xdd, 0x40, 0xcf, 0xe8, 0x3a, 0x55, 0x61, 0x3e, 0x99,
- 0xab, 0x4f, 0xfd, 0x82, 0x05, 0xc2, 0xfb, 0xbd, 0x87, 0x3b, 0xf9, 0x6d, 0x18, 0xdd, 0x4e, 0xa7,
- 0xa2, 0x3d, 0x9b, 0x1f, 0x0e, 0x40, 0x24, 0xa0, 0x55, 0x2c, 0xba, 0x91, 0x76, 0xd6, 0xa0, 0x65,
- 0xd7, 0x41, 0x40, 0x17, 0x09, 0xd3, 0x6a, 0x74, 0xef, 0xcd, 0xf3, 0x00, 0x75, 0x86, 0xcb, 0xf2,
- 0xd3, 0x17, 0x4c, 0x8e, 0x6b, 0x51, 0x41, 0xb0, 0x86, 0x65, 0xff, 0x7a, 0x11, 0x46, 0x64, 0xea,
- 0xd3, 0xb6, 0xd7, 0x8b, 0xec, 0x51, 0x67, 0x9c, 0x0a, 0x5d, 0x19, 0xa7, 0x77, 0x60, 0x2a, 0x20,
- 0xb5, 0x76, 0x10, 0xba, 0xdb, 0x44, 0x82, 0xc5, 0x26, 0x99, 0xe5, 0xc9, 0x22, 0x12, 0xc0, 0x7d,
- 0x16, 0x22, 0x2b, 0x51, 0xc8, 0x94, 0xc6, 0x69, 0x42, 0xe8, 0x02, 0x0c, 0x33, 0xd1, 0x7b, 0x25,
- 0x16, 0x08, 0x2b, 0xc1, 0xd7, 0x8a, 0x04, 0xe0, 0x18, 0x87, 0x3d, 0x0e, 0xda, 0xb7, 0x19, 0x7a,
- 0xc2, 0x13, 0xbc, 0xca, 0x8b, 0xb1, 0x84, 0xa3, 0x8f, 0xc3, 0x24, 0xaf, 0x17, 0xf8, 0x2d, 0x67,
- 0x83, 0xab, 0x04, 0xfb, 0x55, 0x78, 0x9d, 0xc9, 0x95, 0x04, 0x6c, 0x7f, 0xb7, 0x74, 0x34, 0x59,
- 0xc6, 0xba, 0x9d, 0xa2, 0xc2, 0x2c, 0xff, 0x78, 0x23, 0xf4, 0xce, 0x48, 0x19, 0x0c, 0xc6, 0x20,
- 0xac, 0xe3, 0xd9, 0xff, 0x6a, 0xc1, 0x94, 0x36, 0x55, 0x3d, 0xe7, 0xeb, 0x30, 0x06, 0xa9, 0xd0,
- 0xc3, 0x20, 0x1d, 0x2c, 0xda, 0x43, 0xe6, 0x0c, 0xf7, 0x3d, 0xa0, 0x19, 0xb6, 0x3f, 0x03, 0x28,
- 0x9d, 0x57, 0x17, 0xbd, 0xc9, 0x0d, 0xf9, 0xdd, 0x80, 0xd4, 0x3b, 0x29, 0xfc, 0xf5, 0xc8, 0x39,
- 0xd2, 0x73, 0x95, 0xd7, 0xc2, 0xaa, 0xbe, 0xfd, 0xe3, 0x7d, 0x30, 0x99, 0x8c, 0xd5, 0x81, 0xae,
- 0xc0, 0x00, 0xe7, 0xd2, 0x05, 0xf9, 0x0e, 0xf6, 0x64, 0x5a, 0x84, 0x0f, 0x9e, 0x4b, 0x87, 0x73,
- 0xf7, 0xa2, 0x3e, 0x7a, 0x07, 0x46, 0xea, 0xfe, 0x1d, 0xef, 0x8e, 0x13, 0xd4, 0xe7, 0x2a, 0x65,
- 0x71, 0x42, 0x64, 0x0a, 0xa0, 0x16, 0x63, 0x34, 0x3d, 0x6a, 0x08, 0xb3, 0x9d, 0x88, 0x41, 0x58,
- 0x27, 0x87, 0xd6, 0x58, 0x7a, 0xa7, 0x75, 0x77, 0x63, 0xc5, 0x69, 0x75, 0xf2, 0xea, 0x5a, 0x90,
- 0x48, 0x1a, 0xe5, 0x31, 0x91, 0x03, 0x8a, 0x03, 0x70, 0x4c, 0x08, 0x7d, 0x0e, 0x8e, 0x84, 0x39,
- 0x2a, 0xb1, 0xbc, 0x34, 0xeb, 0x9d, 0xb4, 0x44, 0x5c, 0x98, 0x92, 0xa5, 0x3c, 0xcb, 0x6a, 0x06,
- 0xdd, 0x05, 0x24, 0x44, 0xcf, 0x6b, 0x41, 0x3b, 0x8c, 0xe6, 0xdb, 0x5e, 0xbd, 0x21, 0xd3, 0x3f,
- 0x7d, 0x38, 0x5b, 0x4e, 0x90, 0xc4, 0xd6, 0xda, 0x66, 0xe1, 0x85, 0xd3, 0x18, 0x38, 0xa3, 0x0d,
- 0xfb, 0x0b, 0x7d, 0x30, 0x23, 0x13, 0x59, 0x67, 0x78, 0xaf, 0x7c, 0xde, 0x4a, 0xb8, 0xaf, 0xbc,
- 0x92, 0x7f, 0xd0, 0x3f, 0x34, 0x27, 0x96, 0x2f, 0xa5, 0x9d, 0x58, 0x5e, 0x3b, 0x60, 0x37, 0x1e,
- 0x98, 0x2b, 0xcb, 0xf7, 0xac, 0xff, 0xc9, 0xde, 0x51, 0x30, 0xae, 0x66, 0x84, 0x79, 0xec, 0xf6,
- 0x8a, 0x54, 0x1d, 0xe5, 0x3c, 0xff, 0xaf, 0x08, 0x1c, 0xe3, 0xb2, 0x1f, 0x95, 0x11, 0xde, 0xd9,
- 0x39, 0xab, 0xe8, 0x50, 0x9a, 0xa4, 0xd9, 0x8a, 0x76, 0x16, 0xdd, 0x40, 0xf4, 0x38, 0x93, 0xe6,
- 0x92, 0xc0, 0x49, 0xd3, 0x94, 0x10, 0xac, 0xe8, 0xa0, 0x6d, 0x98, 0xda, 0x60, 0x11, 0x9f, 0xb4,
- 0x9c, 0xd2, 0xe2, 0x5c, 0xc8, 0xdc, 0xb7, 0x97, 0x17, 0x96, 0xf2, 0x13, 0x50, 0xf3, 0xc7, 0x5f,
- 0x0a, 0x05, 0xa7, 0x9b, 0xa0, 0x5b, 0xe3, 0xa8, 0x73, 0x27, 0x5c, 0x6a, 0x38, 0x61, 0xe4, 0xd6,
- 0xe6, 0x1b, 0x7e, 0x6d, 0xab, 0x1a, 0xf9, 0x81, 0x4c, 0x16, 0x99, 0xf9, 0xf6, 0x9a, 0xbb, 0x55,
- 0x4d, 0xe1, 0x1b, 0xcd, 0x4f, 0xef, 0xed, 0x96, 0x8e, 0x66, 0x61, 0xe1, 0xcc, 0xb6, 0xd0, 0x2a,
- 0x0c, 0x6e, 0xb8, 0x11, 0x26, 0x2d, 0x5f, 0x9c, 0x16, 0x99, 0x47, 0xe1, 0x65, 0x8e, 0x62, 0xb4,
- 0xc4, 0x22, 0x52, 0x09, 0x00, 0x96, 0x44, 0xd0, 0x9b, 0xea, 0x12, 0x18, 0xc8, 0x17, 0xc0, 0xa6,
- 0x6d, 0xef, 0x32, 0xaf, 0x81, 0xd7, 0xa1, 0xe8, 0xad, 0x87, 0x9d, 0x62, 0xf1, 0xac, 0x2e, 0x1b,
- 0xf2, 0xb3, 0xf9, 0x41, 0xfa, 0x34, 0x5e, 0x5d, 0xae, 0x62, 0x5a, 0x91, 0xb9, 0xbd, 0x86, 0xb5,
- 0xd0, 0x15, 0x89, 0xa7, 0x32, 0xbd, 0x80, 0xcb, 0xd5, 0x85, 0x6a, 0xd9, 0xa0, 0xc1, 0xa2, 0x1a,
- 0xb2, 0x62, 0xcc, 0xab, 0xa3, 0x9b, 0x30, 0xbc, 0xc1, 0x0f, 0xbe, 0xf5, 0x50, 0x24, 0xb3, 0xcf,
- 0xbc, 0x8c, 0x2e, 0x4b, 0x24, 0x83, 0x1e, 0xbb, 0x32, 0x14, 0x08, 0xc7, 0xa4, 0xd0, 0x17, 0x2c,
- 0x38, 0xd6, 0x4a, 0x48, 0x50, 0x99, 0xb3, 0x9a, 0x30, 0x53, 0xcb, 0x74, 0x00, 0xa8, 0x64, 0x55,
- 0x30, 0x1a, 0x64, 0xea, 0x97, 0x4c, 0x34, 0x9c, 0xdd, 0x1c, 0x1d, 0xe8, 0xe0, 0x76, 0xbd, 0x53,
- 0xae, 0xa2, 0x44, 0x60, 0x22, 0x3e, 0xd0, 0x78, 0x7e, 0x11, 0xd3, 0x8a, 0x68, 0x0d, 0x60, 0xbd,
- 0x41, 0x44, 0xc4, 0x47, 0x61, 0x14, 0x95, 0x79, 0xfb, 0x2f, 0x2b, 0x2c, 0x41, 0x87, 0xbd, 0x44,
- 0xe3, 0x52, 0xac, 0xd1, 0xa1, 0x4b, 0xa9, 0xe6, 0x7a, 0x75, 0x12, 0x30, 0xe5, 0x56, 0xce, 0x52,
- 0x5a, 0x60, 0x18, 0xe9, 0xa5, 0xc4, 0xcb, 0xb1, 0xa0, 0xc0, 0x68, 0x91, 0xd6, 0xe6, 0x7a, 0xd8,
- 0x29, 0x2b, 0xc6, 0x02, 0x69, 0x6d, 0x26, 0x16, 0x14, 0xa7, 0xc5, 0xca, 0xb1, 0xa0, 0x40, 0xb7,
- 0xcc, 0x3a, 0xdd, 0x40, 0x24, 0x98, 0x9e, 0xc8, 0xdf, 0x32, 0xcb, 0x1c, 0x25, 0xbd, 0x65, 0x04,
- 0x00, 0x4b, 0x22, 0xe8, 0xd3, 0x26, 0xb7, 0x33, 0xc9, 0x68, 0x3e, 0xd3, 0x85, 0xdb, 0x31, 0xe8,
- 0x76, 0xe6, 0x77, 0x5e, 0x81, 0xc2, 0x7a, 0x8d, 0x29, 0xc5, 0x72, 0x74, 0x06, 0xcb, 0x0b, 0x06,
- 0x35, 0x16, 0x65, 0x7e, 0x79, 0x01, 0x17, 0xd6, 0x6b, 0x74, 0xe9, 0x3b, 0xf7, 0xda, 0x01, 0x59,
- 0x76, 0x1b, 0x44, 0x64, 0xc8, 0xc8, 0x5c, 0xfa, 0x73, 0x12, 0x29, 0xbd, 0xf4, 0x15, 0x08, 0xc7,
- 0xa4, 0x28, 0xdd, 0x98, 0x07, 0x3b, 0x92, 0x4f, 0x57, 0xb1, 0x5a, 0x69, 0xba, 0x99, 0x5c, 0xd8,
- 0x16, 0x8c, 0x6d, 0x87, 0xad, 0x4d, 0x22, 0x4f, 0x45, 0xa6, 0xae, 0xcb, 0x89, 0x54, 0x71, 0x53,
- 0x20, 0xba, 0x41, 0xd4, 0x76, 0x1a, 0xa9, 0x83, 0x9c, 0x89, 0x56, 0x6e, 0xea, 0xc4, 0xb0, 0x49,
- 0x9b, 0x2e, 0x84, 0x77, 0x79, 0x38, 0x39, 0xa6, 0xb8, 0xcb, 0x59, 0x08, 0x19, 0x11, 0xe7, 0xf8,
- 0x42, 0x10, 0x00, 0x2c, 0x89, 0xa8, 0xc1, 0x66, 0x17, 0xd0, 0xf1, 0x2e, 0x83, 0x9d, 0xea, 0x6f,
- 0x3c, 0xd8, 0xec, 0xc2, 0x89, 0x49, 0xb1, 0x8b, 0xa6, 0xb5, 0xe9, 0x47, 0xbe, 0x97, 0xb8, 0xe4,
- 0x4e, 0xe4, 0x5f, 0x34, 0x95, 0x0c, 0xfc, 0xf4, 0x45, 0x93, 0x85, 0x85, 0x33, 0xdb, 0xa2, 0x1f,
- 0xd7, 0x92, 0x91, 0x01, 0x45, 0x16, 0x8f, 0xa7, 0x72, 0x02, 0x6b, 0xa6, 0xc3, 0x07, 0xf2, 0x8f,
- 0x53, 0x20, 0x1c, 0x93, 0x42, 0x75, 0x18, 0x6f, 0x19, 0x11, 0x67, 0x59, 0x36, 0x92, 0x1c, 0xbe,
- 0x20, 0x2b, 0x36, 0x2d, 0x97, 0x10, 0x99, 0x10, 0x9c, 0xa0, 0xc9, 0x2c, 0xf7, 0xb8, 0xab, 0x1f,
- 0x4b, 0x56, 0x92, 0x33, 0xd5, 0x19, 0xde, 0x80, 0x7c, 0xaa, 0x05, 0x00, 0x4b, 0x22, 0x74, 0x34,
- 0x84, 0x83, 0x9a, 0x1f, 0xb2, 0x9c, 0x3f, 0x79, 0x0a, 0xf6, 0x2c, 0x35, 0x91, 0x0c, 0xb3, 0x2e,
- 0x40, 0x38, 0x26, 0x45, 0x4f, 0x72, 0x7a, 0xe1, 0x9d, 0xca, 0x3f, 0xc9, 0x93, 0xd7, 0x1d, 0x3b,
- 0xc9, 0xe9, 0x65, 0x57, 0x14, 0x57, 0x9d, 0x8a, 0x0a, 0xce, 0xf2, 0x95, 0xe4, 0xf4, 0x4b, 0x85,
- 0x15, 0x4f, 0xf7, 0x4b, 0x81, 0x70, 0x4c, 0x8a, 0x5d, 0xc5, 0x2c, 0x34, 0xdd, 0x99, 0x0e, 0x57,
- 0x31, 0x45, 0xc8, 0xb8, 0x8a, 0xb5, 0xd0, 0x75, 0xf6, 0x8f, 0x17, 0xe0, 0x4c, 0xe7, 0x7d, 0x1b,
- 0xeb, 0xd0, 0x2a, 0xb1, 0xcd, 0x52, 0x42, 0x87, 0xc6, 0x25, 0x3a, 0x31, 0x56, 0xcf, 0x01, 0x87,
- 0x2f, 0xc3, 0x94, 0x72, 0x47, 0x6c, 0xb8, 0xb5, 0x1d, 0x2d, 0x49, 0xa9, 0x0a, 0xcd, 0x53, 0x4d,
- 0x22, 0xe0, 0x74, 0x1d, 0x34, 0x07, 0x13, 0x46, 0x61, 0x79, 0x51, 0x3c, 0xff, 0xe3, 0x4c, 0x1b,
- 0x26, 0x18, 0x27, 0xf1, 0xed, 0xdf, 0xb0, 0xe0, 0x44, 0x4e, 0xfe, 0xfb, 0x9e, 0xe3, 0xe9, 0xae,
- 0xc3, 0x44, 0xcb, 0xac, 0xda, 0x25, 0x04, 0xb8, 0x91, 0x65, 0x5f, 0xf5, 0x35, 0x01, 0xc0, 0x49,
- 0xa2, 0xf6, 0xaf, 0x15, 0xe0, 0x74, 0x47, 0xfb, 0x7a, 0x84, 0xe1, 0xf8, 0x46, 0x33, 0x74, 0x16,
- 0x02, 0x52, 0x27, 0x5e, 0xe4, 0x3a, 0x8d, 0x6a, 0x8b, 0xd4, 0x34, 0x2d, 0x28, 0x33, 0x54, 0xbf,
- 0xbc, 0x52, 0x9d, 0x4b, 0x63, 0xe0, 0x9c, 0x9a, 0x68, 0x19, 0x50, 0x1a, 0x22, 0x66, 0x98, 0x3d,
- 0x71, 0xd3, 0xf4, 0x70, 0x46, 0x0d, 0xf4, 0x32, 0x8c, 0x29, 0xbb, 0x7d, 0x6d, 0xc6, 0xd9, 0x05,
- 0x81, 0x75, 0x00, 0x36, 0xf1, 0xd0, 0x45, 0x9e, 0x82, 0x49, 0x24, 0xeb, 0x12, 0x2a, 0xd3, 0x09,
- 0x99, 0x5f, 0x49, 0x14, 0x63, 0x1d, 0x67, 0xfe, 0xd2, 0x5f, 0x7c, 0xeb, 0xcc, 0x87, 0xfe, 0xea,
- 0x5b, 0x67, 0x3e, 0xf4, 0xb7, 0xdf, 0x3a, 0xf3, 0xa1, 0x1f, 0xda, 0x3b, 0x63, 0xfd, 0xc5, 0xde,
- 0x19, 0xeb, 0xaf, 0xf6, 0xce, 0x58, 0x7f, 0xbb, 0x77, 0xc6, 0xfa, 0xdf, 0xf7, 0xce, 0x58, 0x5f,
- 0xfe, 0x3f, 0xce, 0x7c, 0xe8, 0x6d, 0x14, 0x47, 0xa8, 0xbe, 0x40, 0x67, 0xe7, 0xc2, 0xf6, 0xc5,
- 0xff, 0x10, 0x00, 0x00, 0xff, 0xff, 0xf5, 0xf1, 0x8c, 0x4c, 0x2d, 0x26, 0x01, 0x00,
+ // 16415 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6b, 0x90, 0x1c, 0xc9,
+ 0x79, 0x20, 0xc6, 0xea, 0x9e, 0xe7, 0x37, 0xef, 0x1c, 0x3c, 0x1a, 0xb3, 0x00, 0x1a, 0x5b, 0xbb,
+ 0x8b, 0xc5, 0xbe, 0x06, 0xc4, 0x3e, 0xb8, 0xe0, 0xee, 0x72, 0xb5, 0xf3, 0x04, 0x66, 0x81, 0x19,
+ 0xf4, 0x66, 0x0f, 0x00, 0x72, 0xb9, 0xe4, 0xb1, 0xd0, 0x9d, 0x33, 0x53, 0x9c, 0xee, 0xaa, 0xde,
+ 0xaa, 0xea, 0x01, 0x06, 0xe6, 0x85, 0x74, 0x3c, 0x8b, 0x77, 0xa4, 0xe4, 0x08, 0xc6, 0x85, 0xef,
+ 0xce, 0x41, 0x5d, 0xdc, 0x8f, 0xf3, 0xdd, 0x49, 0x32, 0xad, 0xb3, 0x68, 0xc9, 0x92, 0x2c, 0xea,
+ 0x65, 0x5b, 0x8e, 0x90, 0xfd, 0x43, 0x96, 0x14, 0x61, 0x52, 0x61, 0x85, 0x47, 0xe6, 0xd8, 0x0a,
+ 0x05, 0x7f, 0x58, 0x52, 0xc8, 0xfe, 0x61, 0x4f, 0xc8, 0xe6, 0x45, 0x3e, 0x2b, 0xb3, 0x1e, 0xdd,
+ 0x3d, 0x58, 0x60, 0xb8, 0x64, 0xec, 0xbf, 0xee, 0xfc, 0xbe, 0xfc, 0x32, 0x2b, 0x9f, 0x5f, 0x7e,
+ 0x4f, 0xb0, 0xb7, 0x2f, 0x87, 0xb3, 0xae, 0x7f, 0xd1, 0x69, 0xb9, 0x17, 0x6b, 0x7e, 0x40, 0x2e,
+ 0xee, 0x5c, 0xba, 0xb8, 0x49, 0x3c, 0x12, 0x38, 0x11, 0xa9, 0xcf, 0xb6, 0x02, 0x3f, 0xf2, 0x11,
+ 0xe2, 0x38, 0xb3, 0x4e, 0xcb, 0x9d, 0xa5, 0x38, 0xb3, 0x3b, 0x97, 0x66, 0x5e, 0xd8, 0x74, 0xa3,
+ 0xad, 0xf6, 0x9d, 0xd9, 0x9a, 0xdf, 0xbc, 0xb8, 0xe9, 0x6f, 0xfa, 0x17, 0x19, 0xea, 0x9d, 0xf6,
+ 0x06, 0xfb, 0xc7, 0xfe, 0xb0, 0x5f, 0x9c, 0xc4, 0xcc, 0xcb, 0x71, 0x33, 0x4d, 0xa7, 0xb6, 0xe5,
+ 0x7a, 0x24, 0xd8, 0xbd, 0xd8, 0xda, 0xde, 0x64, 0xed, 0x06, 0x24, 0xf4, 0xdb, 0x41, 0x8d, 0x24,
+ 0x1b, 0xee, 0x58, 0x2b, 0xbc, 0xd8, 0x24, 0x91, 0x93, 0xd1, 0xdd, 0x99, 0x8b, 0x79, 0xb5, 0x82,
+ 0xb6, 0x17, 0xb9, 0xcd, 0x74, 0x33, 0x9f, 0xe8, 0x56, 0x21, 0xac, 0x6d, 0x91, 0xa6, 0x93, 0xaa,
+ 0xf7, 0x52, 0x5e, 0xbd, 0x76, 0xe4, 0x36, 0x2e, 0xba, 0x5e, 0x14, 0x46, 0x41, 0xb2, 0x92, 0xfd,
+ 0x5d, 0x0b, 0xce, 0xcd, 0xdd, 0xae, 0x2e, 0x35, 0x9c, 0x30, 0x72, 0x6b, 0xf3, 0x0d, 0xbf, 0xb6,
+ 0x5d, 0x8d, 0xfc, 0x80, 0xdc, 0xf2, 0x1b, 0xed, 0x26, 0xa9, 0xb2, 0x81, 0x40, 0xcf, 0xc3, 0xd0,
+ 0x0e, 0xfb, 0xbf, 0xb2, 0x58, 0xb2, 0xce, 0x59, 0x17, 0x86, 0xe7, 0x27, 0xff, 0x60, 0xaf, 0xfc,
+ 0xb1, 0xfd, 0xbd, 0xf2, 0xd0, 0x2d, 0x51, 0x8e, 0x15, 0x06, 0x3a, 0x0f, 0x03, 0x1b, 0xe1, 0xfa,
+ 0x6e, 0x8b, 0x94, 0x0a, 0x0c, 0x77, 0x5c, 0xe0, 0x0e, 0x2c, 0x57, 0x69, 0x29, 0x16, 0x50, 0x74,
+ 0x11, 0x86, 0x5b, 0x4e, 0x10, 0xb9, 0x91, 0xeb, 0x7b, 0xa5, 0xe2, 0x39, 0xeb, 0x42, 0xff, 0xfc,
+ 0x94, 0x40, 0x1d, 0xae, 0x48, 0x00, 0x8e, 0x71, 0x68, 0x37, 0x02, 0xe2, 0xd4, 0x6f, 0x78, 0x8d,
+ 0xdd, 0x52, 0xdf, 0x39, 0xeb, 0xc2, 0x50, 0xdc, 0x0d, 0x2c, 0xca, 0xb1, 0xc2, 0xb0, 0xbf, 0x51,
+ 0x80, 0xa1, 0xb9, 0x8d, 0x0d, 0xd7, 0x73, 0xa3, 0x5d, 0x74, 0x0b, 0x46, 0x3d, 0xbf, 0x4e, 0xe4,
+ 0x7f, 0xf6, 0x15, 0x23, 0x2f, 0x9e, 0x9b, 0x4d, 0x2f, 0xa5, 0xd9, 0x35, 0x0d, 0x6f, 0x7e, 0x72,
+ 0x7f, 0xaf, 0x3c, 0xaa, 0x97, 0x60, 0x83, 0x0e, 0xc2, 0x30, 0xd2, 0xf2, 0xeb, 0x8a, 0x6c, 0x81,
+ 0x91, 0x2d, 0x67, 0x91, 0xad, 0xc4, 0x68, 0xf3, 0x13, 0xfb, 0x7b, 0xe5, 0x11, 0xad, 0x00, 0xeb,
+ 0x44, 0xd0, 0x1d, 0x98, 0xa0, 0x7f, 0xbd, 0xc8, 0x55, 0x74, 0x8b, 0x8c, 0xee, 0x13, 0x79, 0x74,
+ 0x35, 0xd4, 0xf9, 0xe9, 0xfd, 0xbd, 0xf2, 0x44, 0xa2, 0x10, 0x27, 0x09, 0xda, 0x3f, 0x6b, 0xc1,
+ 0xc4, 0x5c, 0xab, 0x35, 0x17, 0x34, 0xfd, 0xa0, 0x12, 0xf8, 0x1b, 0x6e, 0x83, 0xa0, 0x57, 0xa1,
+ 0x2f, 0xa2, 0xb3, 0xc6, 0x67, 0xf8, 0x09, 0x31, 0xb4, 0x7d, 0x74, 0xae, 0x0e, 0xf6, 0xca, 0xd3,
+ 0x09, 0x74, 0x36, 0x95, 0xac, 0x02, 0x7a, 0x0b, 0x26, 0x1b, 0x7e, 0xcd, 0x69, 0x6c, 0xf9, 0x61,
+ 0x24, 0xa0, 0x62, 0xea, 0x8f, 0xed, 0xef, 0x95, 0x27, 0xaf, 0x27, 0x60, 0x38, 0x85, 0x6d, 0xdf,
+ 0x87, 0xf1, 0xb9, 0x28, 0x72, 0x6a, 0x5b, 0xa4, 0xce, 0x17, 0x14, 0x7a, 0x19, 0xfa, 0x3c, 0xa7,
+ 0x29, 0x3b, 0x73, 0x4e, 0x76, 0x66, 0xcd, 0x69, 0xd2, 0xce, 0x4c, 0xde, 0xf4, 0xdc, 0xf7, 0xdb,
+ 0x62, 0x91, 0xd2, 0x32, 0xcc, 0xb0, 0xd1, 0x8b, 0x00, 0x75, 0xb2, 0xe3, 0xd6, 0x48, 0xc5, 0x89,
+ 0xb6, 0x44, 0x1f, 0x90, 0xa8, 0x0b, 0x8b, 0x0a, 0x82, 0x35, 0x2c, 0xfb, 0x1e, 0x0c, 0xcf, 0xed,
+ 0xf8, 0x6e, 0xbd, 0xe2, 0xd7, 0x43, 0xb4, 0x0d, 0x13, 0xad, 0x80, 0x6c, 0x90, 0x40, 0x15, 0x95,
+ 0xac, 0x73, 0xc5, 0x0b, 0x23, 0x2f, 0x5e, 0xc8, 0x1c, 0x7b, 0x13, 0x75, 0xc9, 0x8b, 0x82, 0xdd,
+ 0xf9, 0x93, 0xa2, 0xbd, 0x89, 0x04, 0x14, 0x27, 0x29, 0xdb, 0xbf, 0x5f, 0x80, 0xe3, 0x73, 0xf7,
+ 0xdb, 0x01, 0x59, 0x74, 0xc3, 0xed, 0xe4, 0x86, 0xab, 0xbb, 0xe1, 0xf6, 0x5a, 0x3c, 0x02, 0x6a,
+ 0xa5, 0x2f, 0x8a, 0x72, 0xac, 0x30, 0xd0, 0x0b, 0x30, 0x48, 0x7f, 0xdf, 0xc4, 0x2b, 0xe2, 0x93,
+ 0xa7, 0x05, 0xf2, 0xc8, 0xa2, 0x13, 0x39, 0x8b, 0x1c, 0x84, 0x25, 0x0e, 0x5a, 0x85, 0x91, 0x1a,
+ 0x3b, 0x1f, 0x36, 0x57, 0xfd, 0x3a, 0x61, 0x6b, 0x6b, 0x78, 0xfe, 0x39, 0x8a, 0xbe, 0x10, 0x17,
+ 0x1f, 0xec, 0x95, 0x4b, 0xbc, 0x6f, 0x82, 0x84, 0x06, 0xc3, 0x7a, 0x7d, 0x64, 0xab, 0xed, 0xde,
+ 0xc7, 0x28, 0x41, 0xc6, 0x56, 0xbf, 0xa0, 0xed, 0xdc, 0x7e, 0xb6, 0x73, 0x47, 0xb3, 0x77, 0x2d,
+ 0xba, 0x04, 0x7d, 0xdb, 0xae, 0x57, 0x2f, 0x0d, 0x30, 0x5a, 0x67, 0xe8, 0x9c, 0x5f, 0x73, 0xbd,
+ 0xfa, 0xc1, 0x5e, 0x79, 0xca, 0xe8, 0x0e, 0x2d, 0xc4, 0x0c, 0xd5, 0xfe, 0xbf, 0x2c, 0x28, 0x33,
+ 0xd8, 0xb2, 0xdb, 0x20, 0x15, 0x12, 0x84, 0x6e, 0x18, 0x11, 0x2f, 0x32, 0x06, 0xf4, 0x45, 0x80,
+ 0x90, 0xd4, 0x02, 0x12, 0x69, 0x43, 0xaa, 0x16, 0x46, 0x55, 0x41, 0xb0, 0x86, 0x45, 0xcf, 0xa7,
+ 0x70, 0xcb, 0x09, 0xd8, 0xfa, 0x12, 0x03, 0xab, 0xce, 0xa7, 0xaa, 0x04, 0xe0, 0x18, 0xc7, 0x38,
+ 0x9f, 0x8a, 0xdd, 0xce, 0x27, 0xf4, 0x29, 0x98, 0x88, 0x1b, 0x0b, 0x5b, 0x4e, 0x4d, 0x0e, 0x20,
+ 0xdb, 0xc1, 0x55, 0x13, 0x84, 0x93, 0xb8, 0xf6, 0x7f, 0x66, 0x89, 0xc5, 0x43, 0xbf, 0xfa, 0x43,
+ 0xfe, 0xad, 0xf6, 0x6f, 0x58, 0x30, 0x38, 0xef, 0x7a, 0x75, 0xd7, 0xdb, 0x44, 0x5f, 0x80, 0x21,
+ 0x7a, 0x55, 0xd6, 0x9d, 0xc8, 0x11, 0xc7, 0xf0, 0xc7, 0xb5, 0xbd, 0xa5, 0x6e, 0xae, 0xd9, 0xd6,
+ 0xf6, 0x26, 0x2d, 0x08, 0x67, 0x29, 0x36, 0xdd, 0x6d, 0x37, 0xee, 0x7c, 0x91, 0xd4, 0xa2, 0x55,
+ 0x12, 0x39, 0xf1, 0xe7, 0xc4, 0x65, 0x58, 0x51, 0x45, 0xd7, 0x60, 0x20, 0x72, 0x82, 0x4d, 0x12,
+ 0x89, 0xf3, 0x38, 0xf3, 0xdc, 0xe4, 0x35, 0x31, 0xdd, 0x91, 0xc4, 0xab, 0x91, 0xf8, 0x96, 0x5a,
+ 0x67, 0x55, 0xb1, 0x20, 0x61, 0xff, 0x7f, 0x83, 0x70, 0x6a, 0xa1, 0xba, 0x92, 0xb3, 0xae, 0xce,
+ 0xc3, 0x40, 0x3d, 0x70, 0x77, 0x48, 0x20, 0xc6, 0x59, 0x51, 0x59, 0x64, 0xa5, 0x58, 0x40, 0xd1,
+ 0x65, 0x18, 0xe5, 0xf7, 0xe3, 0x55, 0xc7, 0xab, 0xc7, 0xc7, 0xa3, 0xc0, 0x1e, 0xbd, 0xa5, 0xc1,
+ 0xb0, 0x81, 0x79, 0xc8, 0x45, 0x75, 0x3e, 0xb1, 0x19, 0xf3, 0xee, 0xde, 0xaf, 0x5a, 0x30, 0xc9,
+ 0x9b, 0x99, 0x8b, 0xa2, 0xc0, 0xbd, 0xd3, 0x8e, 0x48, 0x58, 0xea, 0x67, 0x27, 0xdd, 0x42, 0xd6,
+ 0x68, 0xe5, 0x8e, 0xc0, 0xec, 0xad, 0x04, 0x15, 0x7e, 0x08, 0x96, 0x44, 0xbb, 0x93, 0x49, 0x30,
+ 0x4e, 0x35, 0x8b, 0xfe, 0xa1, 0x05, 0x33, 0x35, 0xdf, 0x8b, 0x02, 0xbf, 0xd1, 0x20, 0x41, 0xa5,
+ 0x7d, 0xa7, 0xe1, 0x86, 0x5b, 0x7c, 0x9d, 0x62, 0xb2, 0xc1, 0x4e, 0x82, 0x9c, 0x39, 0x54, 0x48,
+ 0x62, 0x0e, 0xcf, 0xee, 0xef, 0x95, 0x67, 0x16, 0x72, 0x49, 0xe1, 0x0e, 0xcd, 0xa0, 0x6d, 0x40,
+ 0xf4, 0x66, 0xaf, 0x46, 0xce, 0x26, 0x89, 0x1b, 0x1f, 0xec, 0xbd, 0xf1, 0x13, 0xfb, 0x7b, 0x65,
+ 0xb4, 0x96, 0x22, 0x81, 0x33, 0xc8, 0xa2, 0xf7, 0xe1, 0x18, 0x2d, 0x4d, 0x7d, 0xeb, 0x50, 0xef,
+ 0xcd, 0x95, 0xf6, 0xf7, 0xca, 0xc7, 0xd6, 0x32, 0x88, 0xe0, 0x4c, 0xd2, 0xe8, 0xa7, 0x2c, 0x38,
+ 0x15, 0x7f, 0xfe, 0xd2, 0xbd, 0x96, 0xe3, 0xd5, 0xe3, 0x86, 0x87, 0x7b, 0x6f, 0x98, 0x9e, 0xc9,
+ 0xa7, 0x16, 0xf2, 0x28, 0xe1, 0xfc, 0x46, 0x90, 0x07, 0xd3, 0xb4, 0x6b, 0xc9, 0xb6, 0xa1, 0xf7,
+ 0xb6, 0x4f, 0xee, 0xef, 0x95, 0xa7, 0xd7, 0xd2, 0x34, 0x70, 0x16, 0xe1, 0x99, 0x05, 0x38, 0x9e,
+ 0xb9, 0x3a, 0xd1, 0x24, 0x14, 0xb7, 0x09, 0x67, 0x02, 0x87, 0x31, 0xfd, 0x89, 0x8e, 0x41, 0xff,
+ 0x8e, 0xd3, 0x68, 0x8b, 0x8d, 0x89, 0xf9, 0x9f, 0xd7, 0x0a, 0x97, 0x2d, 0xfb, 0xbf, 0x2f, 0xc2,
+ 0xc4, 0x42, 0x75, 0xe5, 0x81, 0x76, 0xbd, 0x7e, 0xed, 0x15, 0x3a, 0x5e, 0x7b, 0xf1, 0x25, 0x5a,
+ 0xcc, 0xbd, 0x44, 0x7f, 0x32, 0x63, 0xcb, 0xf6, 0xb1, 0x2d, 0xfb, 0xc9, 0x9c, 0x2d, 0xfb, 0x90,
+ 0x37, 0xea, 0x4e, 0xce, 0xaa, 0xed, 0x67, 0x13, 0x98, 0xc9, 0x21, 0x31, 0xde, 0x2f, 0x79, 0xd4,
+ 0x1e, 0x72, 0xe9, 0x3e, 0x9c, 0x79, 0xac, 0xc1, 0xe8, 0x82, 0xd3, 0x72, 0xee, 0xb8, 0x0d, 0x37,
+ 0x72, 0x49, 0x88, 0x9e, 0x86, 0xa2, 0x53, 0xaf, 0x33, 0xee, 0x6e, 0x78, 0xfe, 0xf8, 0xfe, 0x5e,
+ 0xb9, 0x38, 0x57, 0xa7, 0x6c, 0x06, 0x28, 0xac, 0x5d, 0x4c, 0x31, 0xd0, 0xb3, 0xd0, 0x57, 0x0f,
+ 0xfc, 0x56, 0xa9, 0xc0, 0x30, 0xe9, 0x2e, 0xef, 0x5b, 0x0c, 0xfc, 0x56, 0x02, 0x95, 0xe1, 0xd8,
+ 0xbf, 0x57, 0x80, 0xd3, 0x0b, 0xa4, 0xb5, 0xb5, 0x5c, 0xcd, 0xb9, 0x2f, 0x2e, 0xc0, 0x50, 0xd3,
+ 0xf7, 0xdc, 0xc8, 0x0f, 0x42, 0xd1, 0x34, 0x5b, 0x11, 0xab, 0xa2, 0x0c, 0x2b, 0x28, 0x3a, 0x07,
+ 0x7d, 0xad, 0x98, 0x89, 0x1d, 0x95, 0x0c, 0x30, 0x63, 0x5f, 0x19, 0x84, 0x62, 0xb4, 0x43, 0x12,
+ 0x88, 0x15, 0xa3, 0x30, 0x6e, 0x86, 0x24, 0xc0, 0x0c, 0x12, 0x73, 0x02, 0x94, 0x47, 0x10, 0x37,
+ 0x42, 0x82, 0x13, 0xa0, 0x10, 0xac, 0x61, 0xa1, 0x0a, 0x0c, 0x87, 0x89, 0x99, 0xed, 0x69, 0x6b,
+ 0x8e, 0x31, 0x56, 0x41, 0xcd, 0x64, 0x4c, 0xc4, 0xb8, 0xc1, 0x06, 0xba, 0xb2, 0x0a, 0xdf, 0x2e,
+ 0x00, 0xe2, 0x43, 0xf8, 0x23, 0x36, 0x70, 0x37, 0xd3, 0x03, 0xd7, 0xfb, 0x96, 0x78, 0x58, 0xa3,
+ 0xf7, 0x7f, 0x5b, 0x70, 0x7a, 0xc1, 0xf5, 0xea, 0x24, 0xc8, 0x59, 0x80, 0x8f, 0xe6, 0x29, 0x7f,
+ 0x38, 0x26, 0xc5, 0x58, 0x62, 0x7d, 0x0f, 0x61, 0x89, 0xd9, 0x7f, 0x6d, 0x01, 0xe2, 0x9f, 0xfd,
+ 0xa1, 0xfb, 0xd8, 0x9b, 0xe9, 0x8f, 0x7d, 0x08, 0xcb, 0xc2, 0xbe, 0x0e, 0xe3, 0x0b, 0x0d, 0x97,
+ 0x78, 0xd1, 0x4a, 0x65, 0xc1, 0xf7, 0x36, 0xdc, 0x4d, 0xf4, 0x1a, 0x8c, 0x47, 0x6e, 0x93, 0xf8,
+ 0xed, 0xa8, 0x4a, 0x6a, 0xbe, 0xc7, 0x5e, 0xae, 0xd6, 0x85, 0xfe, 0x79, 0xb4, 0xbf, 0x57, 0x1e,
+ 0x5f, 0x37, 0x20, 0x38, 0x81, 0x69, 0xff, 0x5b, 0x7a, 0x6e, 0x35, 0xda, 0x61, 0x44, 0x82, 0xf5,
+ 0xa0, 0x1d, 0x46, 0xf3, 0x6d, 0xca, 0x7b, 0x56, 0x02, 0x9f, 0x76, 0xc7, 0xf5, 0x3d, 0x74, 0xda,
+ 0x78, 0x8e, 0x0f, 0xc9, 0xa7, 0xb8, 0x78, 0x76, 0xcf, 0x02, 0x84, 0xee, 0xa6, 0x47, 0x02, 0xed,
+ 0xf9, 0x30, 0xce, 0xb6, 0x8a, 0x2a, 0xc5, 0x1a, 0x06, 0x6a, 0xc0, 0x58, 0xc3, 0xb9, 0x43, 0x1a,
+ 0x55, 0xd2, 0x20, 0xb5, 0xc8, 0x0f, 0x84, 0x7c, 0xe3, 0xa5, 0xde, 0xde, 0x01, 0xd7, 0xf5, 0xaa,
+ 0xf3, 0x53, 0xfb, 0x7b, 0xe5, 0x31, 0xa3, 0x08, 0x9b, 0xc4, 0xe9, 0xd1, 0xe1, 0xb7, 0xe8, 0x57,
+ 0x38, 0x0d, 0xfd, 0xf1, 0x79, 0x43, 0x94, 0x61, 0x05, 0x55, 0x47, 0x47, 0x5f, 0xde, 0xd1, 0x61,
+ 0xff, 0x19, 0x5d, 0x68, 0x7e, 0xb3, 0xe5, 0x7b, 0xc4, 0x8b, 0x16, 0x7c, 0xaf, 0xce, 0x25, 0x53,
+ 0xaf, 0x19, 0xa2, 0x93, 0xf3, 0x09, 0xd1, 0xc9, 0x89, 0x74, 0x0d, 0x4d, 0x7a, 0xf2, 0x49, 0x18,
+ 0x08, 0x23, 0x27, 0x6a, 0x87, 0x62, 0xe0, 0x1e, 0x97, 0xcb, 0xae, 0xca, 0x4a, 0x0f, 0xf6, 0xca,
+ 0x13, 0xaa, 0x1a, 0x2f, 0xc2, 0xa2, 0x02, 0x7a, 0x06, 0x06, 0x9b, 0x24, 0x0c, 0x9d, 0x4d, 0xc9,
+ 0x36, 0x4c, 0x88, 0xba, 0x83, 0xab, 0xbc, 0x18, 0x4b, 0x38, 0x7a, 0x02, 0xfa, 0x49, 0x10, 0xf8,
+ 0x81, 0xf8, 0xb6, 0x31, 0x81, 0xd8, 0xbf, 0x44, 0x0b, 0x31, 0x87, 0xd9, 0xff, 0x93, 0x05, 0x13,
+ 0xaa, 0xaf, 0xbc, 0xad, 0x23, 0x78, 0xae, 0xbd, 0x0b, 0x50, 0x93, 0x1f, 0x18, 0xb2, 0x6b, 0x76,
+ 0xe4, 0xc5, 0xf3, 0x99, 0x1c, 0x4d, 0x6a, 0x18, 0x63, 0xca, 0xaa, 0x28, 0xc4, 0x1a, 0x35, 0xfb,
+ 0xb7, 0x2d, 0x98, 0x4e, 0x7c, 0xd1, 0x75, 0x37, 0x8c, 0xd0, 0x7b, 0xa9, 0xaf, 0x9a, 0xed, 0x71,
+ 0xf1, 0xb9, 0x21, 0xff, 0x26, 0xb5, 0xe7, 0x65, 0x89, 0xf6, 0x45, 0x57, 0xa1, 0xdf, 0x8d, 0x48,
+ 0x53, 0x7e, 0xcc, 0x13, 0x1d, 0x3f, 0x86, 0xf7, 0x2a, 0x9e, 0x91, 0x15, 0x5a, 0x13, 0x73, 0x02,
+ 0xf6, 0xef, 0x15, 0x61, 0x98, 0xef, 0xef, 0x55, 0xa7, 0x75, 0x04, 0x73, 0xf1, 0x1c, 0x0c, 0xbb,
+ 0xcd, 0x66, 0x3b, 0x72, 0xee, 0x88, 0x7b, 0x6f, 0x88, 0x9f, 0x41, 0x2b, 0xb2, 0x10, 0xc7, 0x70,
+ 0xb4, 0x02, 0x7d, 0xac, 0x2b, 0xfc, 0x2b, 0x9f, 0xce, 0xfe, 0x4a, 0xd1, 0xf7, 0xd9, 0x45, 0x27,
+ 0x72, 0x38, 0xcb, 0xa9, 0xf6, 0x15, 0x2d, 0xc2, 0x8c, 0x04, 0x72, 0x00, 0xee, 0xb8, 0x9e, 0x13,
+ 0xec, 0xd2, 0xb2, 0x52, 0x91, 0x11, 0x7c, 0xa1, 0x33, 0xc1, 0x79, 0x85, 0xcf, 0xc9, 0xaa, 0x0f,
+ 0x8b, 0x01, 0x58, 0x23, 0x3a, 0xf3, 0x2a, 0x0c, 0x2b, 0xe4, 0xc3, 0x70, 0x8e, 0x33, 0x9f, 0x82,
+ 0x89, 0x44, 0x5b, 0xdd, 0xaa, 0x8f, 0xea, 0x8c, 0xe7, 0x6f, 0xb2, 0x23, 0x43, 0xf4, 0x7a, 0xc9,
+ 0xdb, 0x11, 0x77, 0xd3, 0x7d, 0x38, 0xd6, 0xc8, 0x38, 0xf2, 0xc5, 0xbc, 0xf6, 0x7e, 0x45, 0x9c,
+ 0x16, 0x9f, 0x7d, 0x2c, 0x0b, 0x8a, 0x33, 0xdb, 0x30, 0x4e, 0xc4, 0x42, 0xa7, 0x13, 0x91, 0x9e,
+ 0x77, 0xc7, 0x54, 0xe7, 0xaf, 0x91, 0x5d, 0x75, 0xa8, 0xfe, 0x30, 0xbb, 0x7f, 0x86, 0x8f, 0x3e,
+ 0x3f, 0x2e, 0x47, 0x04, 0x81, 0xe2, 0x35, 0xb2, 0xcb, 0xa7, 0x42, 0xff, 0xba, 0x62, 0xc7, 0xaf,
+ 0xfb, 0x96, 0x05, 0x63, 0xea, 0xeb, 0x8e, 0xe0, 0x5c, 0x98, 0x37, 0xcf, 0x85, 0x33, 0x1d, 0x17,
+ 0x78, 0xce, 0x89, 0xf0, 0xed, 0x02, 0x9c, 0x52, 0x38, 0xf4, 0x11, 0xc5, 0xff, 0x88, 0x55, 0x75,
+ 0x11, 0x86, 0x3d, 0x25, 0x4e, 0xb4, 0x4c, 0x39, 0x5e, 0x2c, 0x4c, 0x8c, 0x71, 0xe8, 0x95, 0xe7,
+ 0xc5, 0x97, 0xf6, 0xa8, 0x2e, 0x67, 0x17, 0x97, 0xfb, 0x3c, 0x14, 0xdb, 0x6e, 0x5d, 0x5c, 0x30,
+ 0x1f, 0x97, 0xa3, 0x7d, 0x73, 0x65, 0xf1, 0x60, 0xaf, 0xfc, 0x78, 0x9e, 0xca, 0x89, 0xde, 0x6c,
+ 0xe1, 0xec, 0xcd, 0x95, 0x45, 0x4c, 0x2b, 0xa3, 0x39, 0x98, 0x90, 0x5a, 0xb5, 0x5b, 0x94, 0x2f,
+ 0xf5, 0x3d, 0x71, 0x0f, 0x29, 0x61, 0x39, 0x36, 0xc1, 0x38, 0x89, 0x8f, 0x16, 0x61, 0x72, 0xbb,
+ 0x7d, 0x87, 0x34, 0x48, 0xc4, 0x3f, 0xf8, 0x1a, 0xe1, 0xa2, 0xe4, 0xe1, 0xf8, 0x09, 0x7b, 0x2d,
+ 0x01, 0xc7, 0xa9, 0x1a, 0xf6, 0x0f, 0xd8, 0x7d, 0x20, 0x46, 0x4f, 0xe3, 0x6f, 0x7e, 0x98, 0xcb,
+ 0xb9, 0x97, 0x55, 0x71, 0x8d, 0xec, 0xae, 0xfb, 0x94, 0x0f, 0xc9, 0x5e, 0x15, 0xc6, 0x9a, 0xef,
+ 0xeb, 0xb8, 0xe6, 0x7f, 0xb5, 0x00, 0xc7, 0xd5, 0x08, 0x18, 0xdc, 0xf2, 0x8f, 0xfa, 0x18, 0x5c,
+ 0x82, 0x91, 0x3a, 0xd9, 0x70, 0xda, 0x8d, 0x48, 0xe9, 0x35, 0xfa, 0xb9, 0xaa, 0x6d, 0x31, 0x2e,
+ 0xc6, 0x3a, 0xce, 0x21, 0x86, 0xed, 0x97, 0xc6, 0xd8, 0x45, 0x1c, 0x39, 0x74, 0x8d, 0xab, 0x5d,
+ 0x63, 0xe5, 0xee, 0x9a, 0x27, 0xa0, 0xdf, 0x6d, 0x52, 0xc6, 0xac, 0x60, 0xf2, 0x5b, 0x2b, 0xb4,
+ 0x10, 0x73, 0x18, 0x7a, 0x0a, 0x06, 0x6b, 0x7e, 0xb3, 0xe9, 0x78, 0x75, 0x76, 0xe5, 0x0d, 0xcf,
+ 0x8f, 0x50, 0xde, 0x6d, 0x81, 0x17, 0x61, 0x09, 0xa3, 0xcc, 0xb7, 0x13, 0x6c, 0x72, 0x61, 0x8f,
+ 0x60, 0xbe, 0xe7, 0x82, 0xcd, 0x10, 0xb3, 0x52, 0xfa, 0x56, 0xbd, 0xeb, 0x07, 0xdb, 0xae, 0xb7,
+ 0xb9, 0xe8, 0x06, 0x62, 0x4b, 0xa8, 0xbb, 0xf0, 0xb6, 0x82, 0x60, 0x0d, 0x0b, 0x2d, 0x43, 0x7f,
+ 0xcb, 0x0f, 0xa2, 0xb0, 0x34, 0xc0, 0x86, 0xfb, 0xf1, 0x9c, 0x83, 0x88, 0x7f, 0x6d, 0xc5, 0x0f,
+ 0xa2, 0xf8, 0x03, 0xe8, 0xbf, 0x10, 0xf3, 0xea, 0xe8, 0x3a, 0x0c, 0x12, 0x6f, 0x67, 0x39, 0xf0,
+ 0x9b, 0xa5, 0xe9, 0x7c, 0x4a, 0x4b, 0x1c, 0x85, 0x2f, 0xb3, 0x98, 0x47, 0x15, 0xc5, 0x58, 0x92,
+ 0x40, 0x9f, 0x84, 0x22, 0xf1, 0x76, 0x4a, 0x83, 0x8c, 0xd2, 0x4c, 0x0e, 0xa5, 0x5b, 0x4e, 0x10,
+ 0x9f, 0xf9, 0x4b, 0xde, 0x0e, 0xa6, 0x75, 0xd0, 0x67, 0x60, 0x58, 0x1e, 0x18, 0xa1, 0x90, 0xa2,
+ 0x66, 0x2e, 0x58, 0x79, 0xcc, 0x60, 0xf2, 0x7e, 0xdb, 0x0d, 0x48, 0x93, 0x78, 0x51, 0x18, 0x9f,
+ 0x90, 0x12, 0x1a, 0xe2, 0x98, 0x1a, 0xaa, 0xc1, 0x68, 0x40, 0x42, 0xf7, 0x3e, 0xa9, 0xf8, 0x0d,
+ 0xb7, 0xb6, 0x5b, 0x3a, 0xc9, 0xba, 0xf7, 0x4c, 0xc7, 0x21, 0xc3, 0x5a, 0x85, 0x58, 0xca, 0xaf,
+ 0x97, 0x62, 0x83, 0x28, 0x7a, 0x07, 0xc6, 0x02, 0x12, 0x46, 0x4e, 0x10, 0x89, 0x56, 0x4a, 0x4a,
+ 0x2b, 0x37, 0x86, 0x75, 0x00, 0x7f, 0x4e, 0xc4, 0xcd, 0xc4, 0x10, 0x6c, 0x52, 0x40, 0x9f, 0x91,
+ 0x2a, 0x87, 0x55, 0xbf, 0xed, 0x45, 0x61, 0x69, 0x98, 0xf5, 0x3b, 0x53, 0x37, 0x7d, 0x2b, 0xc6,
+ 0x4b, 0xea, 0x24, 0x78, 0x65, 0x6c, 0x90, 0x42, 0x9f, 0x83, 0x31, 0xfe, 0x9f, 0xab, 0x54, 0xc3,
+ 0xd2, 0x71, 0x46, 0xfb, 0x5c, 0x3e, 0x6d, 0x8e, 0x38, 0x7f, 0x5c, 0x10, 0x1f, 0xd3, 0x4b, 0x43,
+ 0x6c, 0x52, 0x43, 0x18, 0xc6, 0x1a, 0xee, 0x0e, 0xf1, 0x48, 0x18, 0x56, 0x02, 0xff, 0x0e, 0x11,
+ 0x12, 0xe2, 0x53, 0xd9, 0x2a, 0x58, 0xff, 0x0e, 0x11, 0x8f, 0x40, 0xbd, 0x0e, 0x36, 0x49, 0xa0,
+ 0x9b, 0x30, 0x4e, 0x9f, 0xe4, 0x6e, 0x4c, 0x74, 0xa4, 0x1b, 0x51, 0xf6, 0x70, 0xc6, 0x46, 0x25,
+ 0x9c, 0x20, 0x82, 0x6e, 0xc0, 0x28, 0x1b, 0xf3, 0x76, 0x8b, 0x13, 0x3d, 0xd1, 0x8d, 0x28, 0x33,
+ 0x28, 0xa8, 0x6a, 0x55, 0xb0, 0x41, 0x00, 0xbd, 0x0d, 0xc3, 0x0d, 0x77, 0x83, 0xd4, 0x76, 0x6b,
+ 0x0d, 0x52, 0x1a, 0x65, 0xd4, 0x32, 0x0f, 0xc3, 0xeb, 0x12, 0x89, 0xf3, 0xe7, 0xea, 0x2f, 0x8e,
+ 0xab, 0xa3, 0x5b, 0x70, 0x22, 0x22, 0x41, 0xd3, 0xf5, 0x1c, 0x7a, 0x88, 0x89, 0x27, 0x21, 0xd3,
+ 0x8c, 0x8f, 0xb1, 0xd5, 0x75, 0x56, 0xcc, 0xc6, 0x89, 0xf5, 0x4c, 0x2c, 0x9c, 0x53, 0x1b, 0xdd,
+ 0x83, 0x52, 0x06, 0x84, 0xaf, 0xdb, 0x63, 0x8c, 0xf2, 0x1b, 0x82, 0x72, 0x69, 0x3d, 0x07, 0xef,
+ 0xa0, 0x03, 0x0c, 0xe7, 0x52, 0x47, 0x37, 0x60, 0x82, 0x9d, 0x9c, 0x95, 0x76, 0xa3, 0x21, 0x1a,
+ 0x1c, 0x67, 0x0d, 0x3e, 0x25, 0xf9, 0x88, 0x15, 0x13, 0x7c, 0xb0, 0x57, 0x86, 0xf8, 0x1f, 0x4e,
+ 0xd6, 0x46, 0x77, 0x98, 0x12, 0xb6, 0x1d, 0xb8, 0xd1, 0x2e, 0xdd, 0x55, 0xe4, 0x5e, 0x54, 0x9a,
+ 0xe8, 0x28, 0x90, 0xd2, 0x51, 0x95, 0xa6, 0x56, 0x2f, 0xc4, 0x49, 0x82, 0xf4, 0x2a, 0x08, 0xa3,
+ 0xba, 0xeb, 0x95, 0x26, 0xf9, 0x7b, 0x4a, 0x9e, 0xa4, 0x55, 0x5a, 0x88, 0x39, 0x8c, 0x29, 0x60,
+ 0xe9, 0x8f, 0x1b, 0xf4, 0xc6, 0x9d, 0x62, 0x88, 0xb1, 0x02, 0x56, 0x02, 0x70, 0x8c, 0x43, 0x99,
+ 0xe0, 0x28, 0xda, 0x2d, 0x21, 0x86, 0xaa, 0x0e, 0xc4, 0xf5, 0xf5, 0xcf, 0x60, 0x5a, 0x6e, 0xdf,
+ 0x81, 0x71, 0x75, 0x4c, 0xb0, 0x31, 0x41, 0x65, 0xe8, 0x67, 0x6c, 0x9f, 0x10, 0x9f, 0x0e, 0xd3,
+ 0x2e, 0x30, 0x96, 0x10, 0xf3, 0x72, 0xd6, 0x05, 0xf7, 0x3e, 0x99, 0xdf, 0x8d, 0x08, 0x97, 0x45,
+ 0x14, 0xb5, 0x2e, 0x48, 0x00, 0x8e, 0x71, 0xec, 0xff, 0x9f, 0xb3, 0xcf, 0xf1, 0x2d, 0xd1, 0xc3,
+ 0xbd, 0xf8, 0x3c, 0x0c, 0x31, 0xc3, 0x0f, 0x3f, 0xe0, 0xda, 0xd9, 0xfe, 0x98, 0x61, 0xbe, 0x2a,
+ 0xca, 0xb1, 0xc2, 0x40, 0xaf, 0xc3, 0x58, 0x4d, 0x6f, 0x40, 0x5c, 0xea, 0xea, 0x18, 0x31, 0x5a,
+ 0xc7, 0x26, 0x2e, 0xba, 0x0c, 0x43, 0xcc, 0xc6, 0xa9, 0xe6, 0x37, 0x04, 0xb7, 0x29, 0x39, 0x93,
+ 0xa1, 0x8a, 0x28, 0x3f, 0xd0, 0x7e, 0x63, 0x85, 0x8d, 0xce, 0xc3, 0x00, 0xed, 0xc2, 0x4a, 0x45,
+ 0x5c, 0xa7, 0x4a, 0x12, 0x78, 0x95, 0x95, 0x62, 0x01, 0xb5, 0x7f, 0xdb, 0x62, 0xbc, 0x54, 0xfa,
+ 0xcc, 0x47, 0x57, 0xd9, 0xa5, 0xc1, 0x6e, 0x10, 0x4d, 0x0b, 0xff, 0xa4, 0x76, 0x13, 0x28, 0xd8,
+ 0x41, 0xe2, 0x3f, 0x36, 0x6a, 0xa2, 0x77, 0x93, 0x37, 0x03, 0x67, 0x28, 0x5e, 0x96, 0x43, 0x90,
+ 0xbc, 0x1d, 0x1e, 0x8b, 0xaf, 0x38, 0xda, 0x9f, 0x4e, 0x57, 0x84, 0xfd, 0x4f, 0x0a, 0xda, 0x2a,
+ 0xa9, 0x46, 0x4e, 0x44, 0x50, 0x05, 0x06, 0xef, 0x3a, 0x6e, 0xe4, 0x7a, 0x9b, 0x82, 0xef, 0xeb,
+ 0x7c, 0xd1, 0xb1, 0x4a, 0xb7, 0x79, 0x05, 0xce, 0xbd, 0x88, 0x3f, 0x58, 0x92, 0xa1, 0x14, 0x83,
+ 0xb6, 0xe7, 0x51, 0x8a, 0x85, 0x5e, 0x29, 0x62, 0x5e, 0x81, 0x53, 0x14, 0x7f, 0xb0, 0x24, 0x83,
+ 0xde, 0x03, 0x90, 0x27, 0x04, 0xa9, 0x0b, 0xd9, 0xe1, 0xf3, 0xdd, 0x89, 0xae, 0xab, 0x3a, 0x5c,
+ 0x38, 0x19, 0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0xd2, 0xe6, 0x54, 0xef, 0x0c, 0xfa, 0x2c, 0xdd, 0xa2,
+ 0x4e, 0x10, 0x91, 0xfa, 0x5c, 0x24, 0x06, 0xe7, 0xd9, 0xde, 0x1e, 0x87, 0xeb, 0x6e, 0x93, 0xe8,
+ 0xdb, 0x59, 0x10, 0xc1, 0x31, 0x3d, 0xfb, 0xd7, 0x8b, 0x50, 0xca, 0xeb, 0x2e, 0xdd, 0x34, 0xe4,
+ 0x9e, 0x1b, 0x2d, 0x50, 0xb6, 0xd6, 0x32, 0x37, 0xcd, 0x92, 0x28, 0xc7, 0x0a, 0x83, 0xae, 0xde,
+ 0xd0, 0xdd, 0x94, 0x6f, 0xfb, 0xfe, 0x78, 0xf5, 0x56, 0x59, 0x29, 0x16, 0x50, 0x8a, 0x17, 0x10,
+ 0x27, 0x14, 0xc6, 0x77, 0xda, 0x2a, 0xc7, 0xac, 0x14, 0x0b, 0xa8, 0x2e, 0x65, 0xec, 0xeb, 0x22,
+ 0x65, 0x34, 0x86, 0xa8, 0xff, 0xe1, 0x0e, 0x11, 0xfa, 0x3c, 0xc0, 0x86, 0xeb, 0xb9, 0xe1, 0x16,
+ 0xa3, 0x3e, 0x70, 0x68, 0xea, 0x8a, 0x29, 0x5e, 0x56, 0x54, 0xb0, 0x46, 0x11, 0xbd, 0x02, 0x23,
+ 0xea, 0x00, 0x59, 0x59, 0x64, 0xaa, 0x7f, 0xcd, 0x94, 0x2a, 0x3e, 0x4d, 0x17, 0xb1, 0x8e, 0x67,
+ 0x7f, 0x31, 0xb9, 0x5e, 0xc4, 0x0e, 0xd0, 0xc6, 0xd7, 0xea, 0x75, 0x7c, 0x0b, 0x9d, 0xc7, 0xd7,
+ 0xfe, 0xe3, 0x61, 0x98, 0x30, 0x1a, 0x6b, 0x87, 0x3d, 0x9c, 0xb9, 0x57, 0xe8, 0x05, 0xe4, 0x44,
+ 0x44, 0xec, 0x3f, 0xbb, 0xfb, 0x56, 0xd1, 0x2f, 0x29, 0xba, 0x03, 0x78, 0x7d, 0xf4, 0x79, 0x18,
+ 0x6e, 0x38, 0x21, 0x93, 0x58, 0x12, 0xb1, 0xef, 0x7a, 0x21, 0x16, 0x3f, 0x08, 0x9d, 0x30, 0xd2,
+ 0x6e, 0x7d, 0x4e, 0x3b, 0x26, 0x49, 0x6f, 0x4a, 0xca, 0x5f, 0x49, 0xeb, 0x4e, 0xd5, 0x09, 0xca,
+ 0x84, 0xed, 0x62, 0x0e, 0x43, 0x97, 0xd9, 0xd1, 0x4a, 0x57, 0xc5, 0x02, 0xe5, 0x46, 0xd9, 0x32,
+ 0xeb, 0x37, 0x98, 0x6c, 0x05, 0xc3, 0x06, 0x66, 0xfc, 0x26, 0x1b, 0xe8, 0xf0, 0x26, 0x7b, 0x06,
+ 0x06, 0xd9, 0x0f, 0xb5, 0x02, 0xd4, 0x6c, 0xac, 0xf0, 0x62, 0x2c, 0xe1, 0xc9, 0x05, 0x33, 0xd4,
+ 0xdb, 0x82, 0xa1, 0xaf, 0x3e, 0xb1, 0xa8, 0x99, 0xd9, 0xc5, 0x10, 0x3f, 0xe5, 0xc4, 0x92, 0xc7,
+ 0x12, 0x86, 0x7e, 0xde, 0x02, 0xe4, 0x34, 0xe8, 0x6b, 0x99, 0x16, 0xab, 0xc7, 0x0d, 0x30, 0x56,
+ 0xfb, 0xf5, 0xae, 0xc3, 0xde, 0x0e, 0x67, 0xe7, 0x52, 0xb5, 0xb9, 0xa4, 0xf4, 0x35, 0xd1, 0x45,
+ 0x94, 0x46, 0xd0, 0x2f, 0xa3, 0xeb, 0x6e, 0x18, 0x7d, 0xf9, 0xcf, 0x13, 0x97, 0x53, 0x46, 0x97,
+ 0xd0, 0x4d, 0xfd, 0xf1, 0x35, 0x72, 0xc8, 0xc7, 0xd7, 0x58, 0xee, 0xc3, 0xeb, 0xef, 0x25, 0x1e,
+ 0x30, 0xa3, 0xec, 0xcb, 0x9f, 0xea, 0xf2, 0x80, 0x11, 0xe2, 0xf4, 0x5e, 0x9e, 0x31, 0x15, 0xa1,
+ 0x07, 0x1e, 0x63, 0x5d, 0xee, 0xfc, 0x08, 0xbe, 0x19, 0x92, 0x60, 0xfe, 0x94, 0x54, 0x13, 0x1f,
+ 0xe8, 0xbc, 0x87, 0xa6, 0x37, 0xfe, 0x29, 0x0b, 0x4a, 0xe9, 0x01, 0xe2, 0x5d, 0x2a, 0x8d, 0xb3,
+ 0xfe, 0xdb, 0x9d, 0x46, 0x46, 0x74, 0x5e, 0x9a, 0xbb, 0x96, 0xe6, 0x72, 0x68, 0xe1, 0xdc, 0x56,
+ 0xd0, 0x65, 0x80, 0x30, 0xf2, 0x5b, 0xfc, 0xac, 0x67, 0xcc, 0xec, 0x30, 0x33, 0xb8, 0x80, 0xaa,
+ 0x2a, 0x3d, 0x88, 0xef, 0x02, 0x0d, 0x77, 0xa6, 0x0d, 0x27, 0x73, 0x56, 0x4c, 0x86, 0xbc, 0x7b,
+ 0x51, 0x97, 0x77, 0x77, 0x91, 0x92, 0xce, 0xca, 0x39, 0x9d, 0x7d, 0xa7, 0xed, 0x78, 0x91, 0x1b,
+ 0xed, 0xea, 0xf2, 0x71, 0x0f, 0xcc, 0xa1, 0x44, 0x9f, 0x83, 0xfe, 0x86, 0xeb, 0xb5, 0xef, 0x89,
+ 0x3b, 0xf6, 0x7c, 0xf6, 0xf3, 0xc7, 0x6b, 0xdf, 0x33, 0x27, 0xa7, 0x4c, 0xb7, 0x32, 0x2b, 0x3f,
+ 0xd8, 0x2b, 0xa3, 0x34, 0x02, 0xe6, 0x54, 0xed, 0x67, 0x61, 0x7c, 0xd1, 0x21, 0x4d, 0xdf, 0x5b,
+ 0xf2, 0xea, 0x2d, 0xdf, 0xf5, 0x22, 0x54, 0x82, 0x3e, 0xc6, 0x5c, 0xf2, 0xab, 0xb5, 0x8f, 0x0e,
+ 0x3e, 0x66, 0x25, 0xf6, 0x26, 0x1c, 0x5f, 0xf4, 0xef, 0x7a, 0x77, 0x9d, 0xa0, 0x3e, 0x57, 0x59,
+ 0xd1, 0xe4, 0x85, 0x6b, 0x52, 0x5e, 0x65, 0xe5, 0x4b, 0x03, 0xb4, 0x9a, 0x7c, 0x11, 0x2e, 0xbb,
+ 0x0d, 0x92, 0x23, 0xd5, 0xfd, 0xe7, 0x05, 0xa3, 0xa5, 0x18, 0x5f, 0xe9, 0x24, 0xad, 0x5c, 0x73,
+ 0x86, 0x77, 0x60, 0x68, 0xc3, 0x25, 0x8d, 0x3a, 0x26, 0x1b, 0x62, 0x36, 0x9e, 0xce, 0x37, 0x78,
+ 0x5c, 0xa6, 0x98, 0x4a, 0x79, 0xca, 0xa4, 0x5d, 0xcb, 0xa2, 0x32, 0x56, 0x64, 0xd0, 0x36, 0x4c,
+ 0xca, 0x39, 0x93, 0x50, 0x71, 0xde, 0x3f, 0xd3, 0x69, 0xf9, 0x9a, 0xc4, 0x99, 0xf1, 0x37, 0x4e,
+ 0x90, 0xc1, 0x29, 0xc2, 0xe8, 0x34, 0xf4, 0x35, 0x29, 0x67, 0xd3, 0xc7, 0x86, 0x9f, 0x89, 0xb7,
+ 0x98, 0xa4, 0x8e, 0x95, 0xda, 0xff, 0xc2, 0x82, 0x93, 0xa9, 0x91, 0x11, 0x12, 0xcb, 0x87, 0x3c,
+ 0x0b, 0x49, 0x09, 0x62, 0xa1, 0xbb, 0x04, 0xd1, 0xfe, 0xcf, 0x2d, 0x38, 0xb6, 0xd4, 0x6c, 0x45,
+ 0xbb, 0x8b, 0xae, 0x69, 0x7b, 0xf0, 0x2a, 0x0c, 0x34, 0x49, 0xdd, 0x6d, 0x37, 0xc5, 0xcc, 0x95,
+ 0xe5, 0xed, 0xbf, 0xca, 0x4a, 0xe9, 0x09, 0x52, 0x8d, 0xfc, 0xc0, 0xd9, 0x24, 0xbc, 0x00, 0x0b,
+ 0x74, 0xc6, 0x43, 0xb9, 0xf7, 0xc9, 0x75, 0xb7, 0xe9, 0x46, 0x0f, 0xb6, 0xbb, 0x84, 0xd9, 0x80,
+ 0x24, 0x82, 0x63, 0x7a, 0xf6, 0x77, 0x2d, 0x98, 0x90, 0xeb, 0x7e, 0xae, 0x5e, 0x0f, 0x48, 0x18,
+ 0xa2, 0x19, 0x28, 0xb8, 0x2d, 0xd1, 0x4b, 0x10, 0xbd, 0x2c, 0xac, 0x54, 0x70, 0xc1, 0x6d, 0xc9,
+ 0xe7, 0x1a, 0x63, 0x30, 0x8a, 0xa6, 0x05, 0xc5, 0x55, 0x51, 0x8e, 0x15, 0x06, 0xba, 0x00, 0x43,
+ 0x9e, 0x5f, 0xe7, 0x2f, 0x1e, 0xa1, 0x43, 0xa7, 0x98, 0x6b, 0xa2, 0x0c, 0x2b, 0x28, 0xaa, 0xc0,
+ 0x30, 0xb7, 0xaf, 0x8d, 0x17, 0x6d, 0x4f, 0x56, 0xba, 0xec, 0xcb, 0xd6, 0x65, 0x4d, 0x1c, 0x13,
+ 0xb1, 0x7f, 0xd7, 0x82, 0x51, 0xf9, 0x65, 0x3d, 0xbe, 0x45, 0xe9, 0xd6, 0x8a, 0xdf, 0xa1, 0xf1,
+ 0xd6, 0xa2, 0x6f, 0x49, 0x06, 0x31, 0x9e, 0x90, 0xc5, 0x43, 0x3d, 0x21, 0x2f, 0xc1, 0x88, 0xd3,
+ 0x6a, 0x55, 0xcc, 0xf7, 0x27, 0x5b, 0x4a, 0x73, 0x71, 0x31, 0xd6, 0x71, 0xec, 0x9f, 0x2b, 0xc0,
+ 0xb8, 0xfc, 0x82, 0x6a, 0xfb, 0x4e, 0x48, 0x22, 0xb4, 0x0e, 0xc3, 0x0e, 0x9f, 0x25, 0x22, 0x17,
+ 0xf9, 0x13, 0xd9, 0x72, 0x51, 0x63, 0x4a, 0x63, 0x46, 0x7a, 0x4e, 0xd6, 0xc6, 0x31, 0x21, 0xd4,
+ 0x80, 0x29, 0xcf, 0x8f, 0x18, 0x53, 0xa5, 0xe0, 0x9d, 0x54, 0xd5, 0x49, 0xea, 0xa7, 0x04, 0xf5,
+ 0xa9, 0xb5, 0x24, 0x15, 0x9c, 0x26, 0x8c, 0x96, 0xa4, 0xac, 0xb9, 0x98, 0x2f, 0x24, 0xd4, 0x27,
+ 0x2e, 0x5b, 0xd4, 0x6c, 0xff, 0x96, 0x05, 0xc3, 0x12, 0xed, 0x28, 0xac, 0x12, 0x56, 0x61, 0x30,
+ 0x64, 0x93, 0x20, 0x87, 0xc6, 0xee, 0xd4, 0x71, 0x3e, 0x5f, 0x31, 0xaf, 0xc8, 0xff, 0x87, 0x58,
+ 0xd2, 0x60, 0xaa, 0x46, 0xd5, 0xfd, 0x0f, 0x89, 0xaa, 0x51, 0xf5, 0x27, 0xe7, 0x52, 0xfa, 0x4b,
+ 0xd6, 0x67, 0x4d, 0x76, 0x4f, 0x9f, 0x34, 0xad, 0x80, 0x6c, 0xb8, 0xf7, 0x92, 0x4f, 0x9a, 0x0a,
+ 0x2b, 0xc5, 0x02, 0x8a, 0xde, 0x83, 0xd1, 0x9a, 0xd4, 0x31, 0xc5, 0x3b, 0xfc, 0x7c, 0x47, 0x7d,
+ 0xa7, 0x52, 0x8d, 0x73, 0x19, 0xe9, 0x82, 0x56, 0x1f, 0x1b, 0xd4, 0x4c, 0xfb, 0xb1, 0x62, 0x37,
+ 0xfb, 0xb1, 0x98, 0x6e, 0xbe, 0x35, 0xd5, 0xbf, 0xb4, 0x60, 0x80, 0xeb, 0x16, 0x7a, 0x53, 0xed,
+ 0x68, 0x96, 0x02, 0xf1, 0xd8, 0xdd, 0xa2, 0x85, 0x82, 0xb3, 0x41, 0xab, 0x30, 0xcc, 0x7e, 0x30,
+ 0xdd, 0x48, 0x31, 0xdf, 0xdb, 0x8c, 0xb7, 0xaa, 0x77, 0xf0, 0x96, 0xac, 0x86, 0x63, 0x0a, 0xf6,
+ 0xf7, 0x8b, 0xf4, 0x74, 0x8b, 0x51, 0x8d, 0x4b, 0xdf, 0x7a, 0x74, 0x97, 0x7e, 0xe1, 0x51, 0x5d,
+ 0xfa, 0x9b, 0x30, 0x51, 0xd3, 0xec, 0x0a, 0xe2, 0x99, 0xbc, 0xd0, 0x71, 0x91, 0x68, 0x26, 0x08,
+ 0x5c, 0xfa, 0xba, 0x60, 0x12, 0xc1, 0x49, 0xaa, 0xe8, 0xb3, 0x30, 0xca, 0xe7, 0x59, 0xb4, 0xc2,
+ 0x4d, 0xf0, 0x9e, 0xca, 0x5f, 0x2f, 0x7a, 0x13, 0x5c, 0x5a, 0xaf, 0x55, 0xc7, 0x06, 0x31, 0x54,
+ 0x05, 0xd8, 0x70, 0x1b, 0x44, 0x90, 0xee, 0x60, 0x2d, 0xbb, 0xcc, 0xb1, 0x14, 0xe1, 0x71, 0x2e,
+ 0x87, 0x90, 0x55, 0xb1, 0x46, 0xc6, 0xfe, 0x1b, 0x0b, 0xd0, 0x52, 0x6b, 0x8b, 0x34, 0x49, 0xe0,
+ 0x34, 0x62, 0x9d, 0xe3, 0xd7, 0x2c, 0x28, 0x91, 0x54, 0xf1, 0x82, 0xdf, 0x6c, 0x0a, 0x09, 0x43,
+ 0x8e, 0x10, 0x6c, 0x29, 0xa7, 0x4e, 0xfc, 0xca, 0xc8, 0xc3, 0xc0, 0xb9, 0xed, 0xa1, 0x55, 0x98,
+ 0xe6, 0x57, 0xaf, 0x02, 0x68, 0xa6, 0x7f, 0x8f, 0x09, 0xc2, 0xd3, 0xeb, 0x69, 0x14, 0x9c, 0x55,
+ 0xcf, 0xfe, 0xad, 0x31, 0xc8, 0xed, 0xc5, 0x47, 0xca, 0xd6, 0x8f, 0x94, 0xad, 0x1f, 0x29, 0x5b,
+ 0x3f, 0x52, 0xb6, 0x7e, 0xa4, 0x6c, 0xfd, 0x48, 0xd9, 0xfa, 0x21, 0x55, 0xb6, 0xfe, 0x53, 0x0b,
+ 0x8e, 0xab, 0xeb, 0xcb, 0x90, 0x02, 0x7c, 0x09, 0xa6, 0xf9, 0x76, 0x5b, 0x68, 0x38, 0x6e, 0x73,
+ 0x9d, 0x34, 0x5b, 0x0d, 0x27, 0x92, 0x26, 0x55, 0x97, 0x32, 0x57, 0x6e, 0xc2, 0x6f, 0xc3, 0xa8,
+ 0xc8, 0x1d, 0xe0, 0x32, 0x00, 0x38, 0xab, 0x19, 0xfb, 0xd7, 0x87, 0xa0, 0x7f, 0x69, 0x87, 0x78,
+ 0xd1, 0x11, 0xbc, 0x97, 0x6a, 0x30, 0xee, 0x7a, 0x3b, 0x7e, 0x63, 0x87, 0xd4, 0x39, 0xfc, 0x30,
+ 0xcf, 0xfa, 0x13, 0x82, 0xf4, 0xf8, 0x8a, 0x41, 0x02, 0x27, 0x48, 0x3e, 0x0a, 0x95, 0xd5, 0x15,
+ 0x18, 0xe0, 0x97, 0x8f, 0x60, 0xdf, 0x32, 0xcf, 0x6c, 0x36, 0x88, 0xe2, 0x4a, 0x8d, 0xd5, 0x69,
+ 0xfc, 0x72, 0x13, 0xd5, 0xd1, 0x17, 0x61, 0x7c, 0xc3, 0x0d, 0xc2, 0x68, 0xdd, 0x6d, 0xd2, 0xab,
+ 0xa1, 0xd9, 0x7a, 0x00, 0x15, 0x95, 0x1a, 0x87, 0x65, 0x83, 0x12, 0x4e, 0x50, 0x46, 0x9b, 0x30,
+ 0xd6, 0x70, 0xf4, 0xa6, 0x06, 0x0f, 0xdd, 0x94, 0xba, 0x1d, 0xae, 0xeb, 0x84, 0xb0, 0x49, 0x97,
+ 0x6e, 0xa7, 0x1a, 0xd3, 0xb2, 0x0c, 0x31, 0x19, 0x89, 0xda, 0x4e, 0x5c, 0xbd, 0xc2, 0x61, 0x94,
+ 0x41, 0x63, 0xde, 0x0f, 0xc3, 0x26, 0x83, 0xa6, 0xf9, 0x38, 0x7c, 0x01, 0x86, 0x09, 0x1d, 0x42,
+ 0x4a, 0x58, 0x5c, 0x30, 0x17, 0x7b, 0xeb, 0xeb, 0xaa, 0x5b, 0x0b, 0x7c, 0x53, 0x39, 0xb8, 0x24,
+ 0x29, 0xe1, 0x98, 0x28, 0x5a, 0x80, 0x81, 0x90, 0x04, 0xae, 0x52, 0x40, 0x74, 0x98, 0x46, 0x86,
+ 0xc6, 0x3d, 0x2c, 0xf9, 0x6f, 0x2c, 0xaa, 0xd2, 0xe5, 0xe5, 0x30, 0xf9, 0x2e, 0xbb, 0x0c, 0xb4,
+ 0xe5, 0x35, 0xc7, 0x4a, 0xb1, 0x80, 0xa2, 0xb7, 0x61, 0x30, 0x20, 0x0d, 0xa6, 0x7d, 0x1e, 0xeb,
+ 0x7d, 0x91, 0x73, 0x65, 0x36, 0xaf, 0x87, 0x25, 0x01, 0x74, 0x0d, 0x50, 0x40, 0x28, 0x83, 0xe7,
+ 0x7a, 0x9b, 0xca, 0x27, 0x40, 0x1c, 0xb4, 0x8a, 0x91, 0xc6, 0x31, 0x86, 0x74, 0xae, 0xc5, 0x19,
+ 0xd5, 0xd0, 0x15, 0x98, 0x52, 0xa5, 0x2b, 0x5e, 0x18, 0x39, 0xf4, 0x80, 0xe3, 0x3a, 0x00, 0x25,
+ 0xb4, 0xc1, 0x49, 0x04, 0x9c, 0xae, 0x63, 0xff, 0xa2, 0x05, 0x7c, 0x9c, 0x8f, 0x40, 0x54, 0xf1,
+ 0xa6, 0x29, 0xaa, 0x38, 0x95, 0x3b, 0x73, 0x39, 0x62, 0x8a, 0x5f, 0xb4, 0x60, 0x44, 0x9b, 0xd9,
+ 0x78, 0xcd, 0x5a, 0x1d, 0xd6, 0x6c, 0x1b, 0x26, 0xe9, 0x4a, 0xbf, 0x71, 0x27, 0x24, 0xc1, 0x0e,
+ 0xa9, 0xb3, 0x85, 0x59, 0x78, 0xb0, 0x85, 0xa9, 0xec, 0x8f, 0xaf, 0x27, 0x08, 0xe2, 0x54, 0x13,
+ 0xf6, 0x17, 0x64, 0x57, 0x95, 0xb9, 0x76, 0x4d, 0xcd, 0x79, 0xc2, 0x5c, 0x5b, 0xcd, 0x2a, 0x8e,
+ 0x71, 0xe8, 0x56, 0xdb, 0xf2, 0xc3, 0x28, 0x69, 0xae, 0x7d, 0xd5, 0x0f, 0x23, 0xcc, 0x20, 0xf6,
+ 0x4b, 0x00, 0x4b, 0xf7, 0x48, 0x8d, 0xaf, 0x58, 0xfd, 0xd1, 0x63, 0xe5, 0x3f, 0x7a, 0xec, 0x3f,
+ 0xb1, 0x60, 0x7c, 0x79, 0xc1, 0xb8, 0xb9, 0x66, 0x01, 0xf8, 0x4b, 0xed, 0xf6, 0xed, 0x35, 0x69,
+ 0x33, 0xc4, 0xcd, 0x26, 0x54, 0x29, 0xd6, 0x30, 0xd0, 0x29, 0x28, 0x36, 0xda, 0x9e, 0x90, 0xa5,
+ 0x0e, 0xd2, 0xeb, 0xf1, 0x7a, 0xdb, 0xc3, 0xb4, 0x4c, 0x73, 0xac, 0x2b, 0xf6, 0xec, 0x58, 0xd7,
+ 0x35, 0xbe, 0x0f, 0x2a, 0x43, 0xff, 0xdd, 0xbb, 0x6e, 0x9d, 0x87, 0x2d, 0x10, 0xf6, 0x4c, 0xb7,
+ 0x6f, 0xaf, 0x2c, 0x86, 0x98, 0x97, 0xdb, 0xbf, 0x6c, 0xc1, 0x44, 0xe2, 0xdd, 0x4d, 0xdf, 0x6f,
+ 0x3b, 0x2a, 0x68, 0x4c, 0x32, 0x36, 0x86, 0x16, 0x4e, 0x46, 0xc3, 0xea, 0xc1, 0xa1, 0x54, 0x38,
+ 0x24, 0x14, 0x7b, 0x70, 0x48, 0xe8, 0x6c, 0x65, 0xfc, 0xf5, 0x22, 0xcc, 0x2c, 0x37, 0xc8, 0xbd,
+ 0x0f, 0x18, 0x6d, 0xa2, 0x57, 0x4f, 0xc6, 0xc3, 0x09, 0xd2, 0x0e, 0xeb, 0xad, 0xda, 0x7d, 0x0a,
+ 0x37, 0x60, 0x90, 0x7f, 0xba, 0x8c, 0x3d, 0x91, 0xa9, 0xd6, 0xce, 0x1f, 0x90, 0x59, 0x3e, 0x84,
+ 0x42, 0xad, 0xad, 0xee, 0x78, 0x51, 0x8a, 0x25, 0xf1, 0x99, 0xd7, 0x60, 0x54, 0xc7, 0x3c, 0x94,
+ 0xdf, 0xf8, 0x3f, 0x28, 0xc2, 0x24, 0xed, 0xc1, 0x23, 0x9d, 0x88, 0x9b, 0xe9, 0x89, 0x78, 0xd8,
+ 0xbe, 0xc3, 0xdd, 0x67, 0xe3, 0xbd, 0xe4, 0x6c, 0x5c, 0xca, 0x9b, 0x8d, 0xa3, 0x9e, 0x83, 0x7f,
+ 0x68, 0xc1, 0xf4, 0x72, 0xc3, 0xaf, 0x6d, 0x27, 0xfc, 0x7b, 0x5f, 0x81, 0x11, 0x7a, 0x83, 0x84,
+ 0x46, 0xa8, 0x1b, 0x23, 0xf8, 0x91, 0x00, 0x61, 0x1d, 0x4f, 0xab, 0x76, 0xf3, 0xe6, 0xca, 0x62,
+ 0x56, 0xcc, 0x24, 0x01, 0xc2, 0x3a, 0x9e, 0xfd, 0x87, 0x16, 0x9c, 0xb9, 0xb2, 0xb0, 0x14, 0x2f,
+ 0xc5, 0x54, 0xd8, 0xa6, 0xf3, 0x30, 0xd0, 0xaa, 0x6b, 0x5d, 0x89, 0xc5, 0xe3, 0x8b, 0xac, 0x17,
+ 0x02, 0xfa, 0x61, 0x89, 0x90, 0x76, 0x13, 0xe0, 0x0a, 0xae, 0x2c, 0x88, 0xab, 0x42, 0x6a, 0xc3,
+ 0xac, 0x5c, 0x6d, 0xd8, 0x53, 0x30, 0x48, 0xaf, 0x32, 0xb7, 0x26, 0xfb, 0xcd, 0x0d, 0x57, 0x78,
+ 0x11, 0x96, 0x30, 0xfb, 0x17, 0x2c, 0x98, 0xbe, 0xe2, 0x46, 0x94, 0xcf, 0x48, 0xc6, 0x25, 0xa2,
+ 0x8c, 0x46, 0xe8, 0x46, 0x7e, 0xb0, 0x9b, 0x3c, 0x7b, 0xb1, 0x82, 0x60, 0x0d, 0x8b, 0x7f, 0xd0,
+ 0x8e, 0xcb, 0x3c, 0x86, 0x0a, 0xa6, 0xfe, 0x11, 0x8b, 0x72, 0xac, 0x30, 0xe8, 0x78, 0xd5, 0xdd,
+ 0x80, 0x9d, 0xf4, 0xf2, 0x34, 0x56, 0xe3, 0xb5, 0x28, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0xca, 0x82,
+ 0xf2, 0x15, 0xee, 0xf7, 0xbc, 0x11, 0xe6, 0x1c, 0xba, 0x2f, 0xc1, 0x30, 0x91, 0x8a, 0x12, 0xd1,
+ 0x6b, 0xc5, 0x3b, 0x2b, 0x0d, 0x0a, 0x0f, 0x8f, 0xa4, 0xf0, 0x7a, 0xb8, 0x33, 0x0e, 0xe7, 0x45,
+ 0xbe, 0x0c, 0x88, 0xe8, 0x6d, 0xe9, 0xf1, 0xa2, 0x58, 0xe0, 0x99, 0xa5, 0x14, 0x14, 0x67, 0xd4,
+ 0xb0, 0xff, 0x85, 0x05, 0xc7, 0xd5, 0x07, 0x7f, 0xe8, 0x3e, 0xd3, 0xfe, 0x95, 0x02, 0x8c, 0x5d,
+ 0x5d, 0x5f, 0xaf, 0x5c, 0x21, 0x91, 0xb6, 0x2a, 0x3b, 0x9b, 0x3f, 0x60, 0x4d, 0x8b, 0xdb, 0xe9,
+ 0x59, 0xdb, 0x8e, 0xdc, 0xc6, 0x2c, 0x8f, 0x82, 0x38, 0xbb, 0xe2, 0x45, 0x37, 0x82, 0x6a, 0x14,
+ 0xb8, 0xde, 0x66, 0xe6, 0x4a, 0x97, 0x6c, 0x56, 0x31, 0x8f, 0xcd, 0x42, 0x2f, 0xc1, 0x00, 0x0b,
+ 0xc3, 0x28, 0x27, 0xe1, 0x31, 0xf5, 0x2a, 0x64, 0xa5, 0x07, 0x7b, 0xe5, 0xe1, 0x9b, 0x78, 0x85,
+ 0xff, 0xc1, 0x02, 0x15, 0xdd, 0x84, 0x91, 0xad, 0x28, 0x6a, 0x5d, 0x25, 0x4e, 0x9d, 0x04, 0xf2,
+ 0x94, 0x3d, 0x9b, 0x75, 0xca, 0xd2, 0x41, 0xe0, 0x68, 0xf1, 0xc1, 0x14, 0x97, 0x85, 0x58, 0xa7,
+ 0x63, 0x57, 0x01, 0x62, 0xd8, 0x43, 0x52, 0x60, 0xd9, 0xeb, 0x30, 0x4c, 0x3f, 0x77, 0xae, 0xe1,
+ 0x3a, 0x9d, 0x4d, 0x04, 0x9e, 0x83, 0x61, 0x69, 0x00, 0x10, 0x8a, 0x20, 0x29, 0xec, 0x46, 0x92,
+ 0xf6, 0x01, 0x21, 0x8e, 0xe1, 0xf6, 0x93, 0x20, 0x6c, 0xa8, 0x3b, 0x91, 0xb4, 0x37, 0xe0, 0x18,
+ 0x33, 0x06, 0x77, 0xa2, 0x2d, 0x63, 0x8d, 0x76, 0x5f, 0x0c, 0xcf, 0x8b, 0xa7, 0x68, 0x41, 0xd9,
+ 0x3d, 0x49, 0x27, 0xfc, 0x51, 0x49, 0x31, 0x7e, 0x96, 0xda, 0xdf, 0xef, 0x83, 0xc7, 0x56, 0xaa,
+ 0xf9, 0xd1, 0xbd, 0x2e, 0xc3, 0x28, 0xe7, 0x70, 0xe9, 0xd2, 0x70, 0x1a, 0xa2, 0x5d, 0x25, 0xb4,
+ 0x5d, 0xd7, 0x60, 0xd8, 0xc0, 0xa4, 0x1c, 0xa1, 0xfb, 0xbe, 0x97, 0x74, 0x51, 0x5d, 0x79, 0x67,
+ 0x0d, 0xd3, 0x72, 0x0a, 0xa6, 0xcc, 0x32, 0x3f, 0xd2, 0x15, 0x58, 0x31, 0xcc, 0x6f, 0xc2, 0xb8,
+ 0x1b, 0xd6, 0x42, 0x77, 0xc5, 0xa3, 0xfb, 0x54, 0xdb, 0xe9, 0x4a, 0x4c, 0x42, 0x3b, 0xad, 0xa0,
+ 0x38, 0x81, 0xad, 0xdd, 0x2f, 0xfd, 0x3d, 0x33, 0xdc, 0x5d, 0x63, 0x8b, 0xd0, 0xe3, 0xbf, 0xc5,
+ 0xbe, 0x2e, 0x64, 0x5a, 0x03, 0x71, 0xfc, 0xf3, 0x0f, 0x0e, 0xb1, 0x84, 0xd1, 0x37, 0x68, 0x6d,
+ 0xcb, 0x69, 0xcd, 0xb5, 0xa3, 0xad, 0x45, 0x37, 0xac, 0xf9, 0x3b, 0x24, 0xd8, 0x65, 0xe2, 0x83,
+ 0xa1, 0xf8, 0x0d, 0xaa, 0x00, 0x0b, 0x57, 0xe7, 0x2a, 0x14, 0x13, 0xa7, 0xeb, 0xa0, 0x39, 0x98,
+ 0x90, 0x85, 0x55, 0x12, 0xb2, 0x2b, 0x60, 0x84, 0x91, 0x51, 0x4e, 0xa3, 0xa2, 0x58, 0x11, 0x49,
+ 0xe2, 0x9b, 0x0c, 0x2e, 0x3c, 0x0c, 0x06, 0xf7, 0x55, 0x18, 0x73, 0x3d, 0x37, 0x72, 0x9d, 0xc8,
+ 0xe7, 0x2a, 0x2f, 0x2e, 0x29, 0x60, 0x32, 0xf1, 0x15, 0x1d, 0x80, 0x4d, 0x3c, 0xfb, 0xff, 0xe8,
+ 0x83, 0x29, 0x36, 0x6d, 0x1f, 0xad, 0xb0, 0x1f, 0xa7, 0x15, 0x76, 0x33, 0xbd, 0xc2, 0x1e, 0x06,
+ 0xe7, 0xfe, 0xc0, 0xcb, 0xec, 0x2b, 0x16, 0x4c, 0x31, 0xb1, 0xbc, 0xb1, 0xcc, 0x2e, 0xc2, 0x70,
+ 0x60, 0xf8, 0xf3, 0x0e, 0xeb, 0x7a, 0x38, 0xe9, 0x9a, 0x1b, 0xe3, 0xa0, 0xb7, 0x00, 0x5a, 0xb1,
+ 0xd8, 0xbf, 0x60, 0x04, 0x61, 0x85, 0x5c, 0x89, 0xbf, 0x56, 0xc7, 0xfe, 0x22, 0x0c, 0x2b, 0x87,
+ 0x5d, 0xf9, 0x40, 0xb6, 0x72, 0x1e, 0xc8, 0xdd, 0xd9, 0x08, 0x69, 0x23, 0x58, 0xcc, 0xb4, 0x11,
+ 0xfc, 0x0b, 0x0b, 0x62, 0xa5, 0x0c, 0x7a, 0x07, 0x86, 0x5b, 0x3e, 0x33, 0x29, 0x0f, 0xa4, 0x9f,
+ 0xc6, 0x93, 0x1d, 0xb5, 0x3a, 0x3c, 0xd2, 0x62, 0xc0, 0xa7, 0xa3, 0x22, 0xab, 0xe2, 0x98, 0x0a,
+ 0xba, 0x06, 0x83, 0xad, 0x80, 0x54, 0x23, 0x16, 0x06, 0xac, 0x77, 0x82, 0x7c, 0xf9, 0xf2, 0x8a,
+ 0x58, 0x52, 0x48, 0x58, 0xe8, 0x16, 0x7b, 0xb7, 0xd0, 0xb5, 0xff, 0x5d, 0x01, 0x26, 0x93, 0x8d,
+ 0xa0, 0x37, 0xa0, 0x8f, 0xdc, 0x23, 0x35, 0xf1, 0xa5, 0x99, 0xdc, 0x44, 0x2c, 0x10, 0xe2, 0x43,
+ 0x47, 0xff, 0x63, 0x56, 0x0b, 0x5d, 0x85, 0x41, 0xca, 0x4a, 0x5c, 0x51, 0xc1, 0x32, 0x1f, 0xcf,
+ 0x63, 0x47, 0x14, 0x4f, 0xc6, 0x3f, 0x4b, 0x14, 0x61, 0x59, 0x9d, 0x99, 0xf4, 0xd5, 0x5a, 0x55,
+ 0xfa, 0x4a, 0x8b, 0x3a, 0x09, 0x13, 0xd6, 0x17, 0x2a, 0x1c, 0x49, 0x50, 0xe3, 0x26, 0x7d, 0xb2,
+ 0x10, 0xc7, 0x44, 0xd0, 0x5b, 0xd0, 0x1f, 0x36, 0x08, 0x69, 0x09, 0x9b, 0x8d, 0x4c, 0x91, 0x6e,
+ 0x95, 0x22, 0x08, 0x4a, 0x4c, 0x04, 0xc4, 0x0a, 0x30, 0xaf, 0x68, 0xff, 0xaa, 0x05, 0xc0, 0x6d,
+ 0x20, 0x1d, 0x6f, 0x93, 0x1c, 0x81, 0x16, 0x64, 0x11, 0xfa, 0xc2, 0x16, 0xa9, 0x75, 0xf2, 0xb4,
+ 0x88, 0xfb, 0x53, 0x6d, 0x91, 0x5a, 0xbc, 0xda, 0xe9, 0x3f, 0xcc, 0x6a, 0xdb, 0x3f, 0x0d, 0x30,
+ 0x1e, 0xa3, 0xad, 0x44, 0xa4, 0x89, 0x5e, 0x30, 0x22, 0x0c, 0x9d, 0x4a, 0x44, 0x18, 0x1a, 0x66,
+ 0xd8, 0x9a, 0xc0, 0xfd, 0x8b, 0x50, 0x6c, 0x3a, 0xf7, 0x84, 0x44, 0xf5, 0xb9, 0xce, 0xdd, 0xa0,
+ 0xf4, 0x67, 0x57, 0x9d, 0x7b, 0xfc, 0x05, 0xff, 0x9c, 0xdc, 0x9d, 0xab, 0xce, 0xbd, 0xae, 0xde,
+ 0x00, 0xb4, 0x11, 0xd6, 0x96, 0xeb, 0x09, 0xf3, 0xbe, 0x9e, 0xda, 0x72, 0xbd, 0x64, 0x5b, 0xae,
+ 0xd7, 0x43, 0x5b, 0xae, 0x87, 0xee, 0xc3, 0xa0, 0xb0, 0xbe, 0x15, 0xa1, 0x0f, 0x2f, 0xf6, 0xd0,
+ 0x9e, 0x30, 0xde, 0xe5, 0x6d, 0x5e, 0x94, 0x12, 0x0a, 0x51, 0xda, 0xb5, 0x5d, 0xd9, 0x20, 0xfa,
+ 0x67, 0x16, 0x8c, 0x8b, 0xdf, 0x98, 0xbc, 0xdf, 0x26, 0x61, 0x24, 0x38, 0xf8, 0x4f, 0xf4, 0xde,
+ 0x07, 0x51, 0x91, 0x77, 0xe5, 0x13, 0xf2, 0xb2, 0x35, 0x81, 0x5d, 0x7b, 0x94, 0xe8, 0x05, 0xfa,
+ 0x77, 0x16, 0x1c, 0x6b, 0x3a, 0xf7, 0x78, 0x8b, 0xbc, 0x0c, 0x3b, 0x91, 0xeb, 0x0b, 0x83, 0x93,
+ 0x37, 0x7a, 0x9b, 0xfe, 0x54, 0x75, 0xde, 0x49, 0xa9, 0x5d, 0x3e, 0x96, 0x85, 0xd2, 0xb5, 0xab,
+ 0x99, 0xfd, 0x9a, 0xd9, 0x80, 0x21, 0xb9, 0xde, 0x1e, 0xa5, 0x6b, 0x01, 0x6b, 0x47, 0xac, 0xb5,
+ 0x47, 0xda, 0xce, 0x17, 0x61, 0x54, 0x5f, 0x63, 0x8f, 0xb4, 0xad, 0xf7, 0x61, 0x3a, 0x63, 0x2d,
+ 0x3d, 0xd2, 0x26, 0xef, 0xc2, 0xa9, 0xdc, 0xf5, 0xf1, 0x48, 0x5d, 0x43, 0x7e, 0xc5, 0xd2, 0xcf,
+ 0xc1, 0x23, 0x50, 0x45, 0x2d, 0x98, 0xaa, 0xa8, 0xb3, 0x9d, 0x77, 0x4e, 0x8e, 0x3e, 0xea, 0x3d,
+ 0xbd, 0xd3, 0xf4, 0x54, 0x47, 0x6f, 0xc3, 0x40, 0x83, 0x96, 0x48, 0x1b, 0x6e, 0xbb, 0xfb, 0x8e,
+ 0x8c, 0x39, 0x6a, 0x56, 0x1e, 0x62, 0x41, 0xc1, 0xfe, 0x86, 0x05, 0x19, 0xce, 0x2d, 0x94, 0xc3,
+ 0x6a, 0xbb, 0x75, 0x36, 0x24, 0xc5, 0x98, 0xc3, 0x52, 0x01, 0x78, 0xce, 0x40, 0x71, 0xd3, 0xad,
+ 0x0b, 0xaf, 0x6e, 0x05, 0xbe, 0x42, 0xc1, 0x9b, 0x6e, 0x1d, 0x2d, 0x03, 0x0a, 0xdb, 0xad, 0x56,
+ 0x83, 0xd9, 0x68, 0x39, 0x8d, 0x2b, 0x81, 0xdf, 0x6e, 0x71, 0x83, 0xed, 0x22, 0x17, 0x2f, 0x55,
+ 0x53, 0x50, 0x9c, 0x51, 0xc3, 0xfe, 0x0d, 0x0b, 0xfa, 0x8e, 0x60, 0x9a, 0xb0, 0x39, 0x4d, 0x2f,
+ 0xe4, 0x92, 0x16, 0x19, 0x33, 0x66, 0xb1, 0x73, 0x77, 0xe9, 0x5e, 0x44, 0xbc, 0x90, 0x31, 0x1c,
+ 0x99, 0xb3, 0xb6, 0x67, 0xc1, 0xf4, 0x75, 0xdf, 0xa9, 0xcf, 0x3b, 0x0d, 0xc7, 0xab, 0x91, 0x60,
+ 0xc5, 0xdb, 0x3c, 0x94, 0x77, 0x44, 0xa1, 0xab, 0x77, 0xc4, 0x65, 0x18, 0x70, 0x5b, 0x5a, 0xc8,
+ 0xfd, 0x73, 0x74, 0x76, 0x57, 0x2a, 0x22, 0xda, 0x3e, 0x32, 0x1a, 0x67, 0xa5, 0x58, 0xe0, 0xd3,
+ 0x65, 0xc9, 0x2d, 0x08, 0xfb, 0xf2, 0x97, 0x25, 0x7d, 0x25, 0x25, 0x43, 0xc9, 0x19, 0x06, 0xf4,
+ 0x5b, 0x60, 0x34, 0x21, 0xdc, 0xc5, 0x30, 0x0c, 0xba, 0xfc, 0x4b, 0xc5, 0xda, 0x7c, 0x3a, 0xfb,
+ 0xf5, 0x92, 0x1a, 0x18, 0xcd, 0x2f, 0x92, 0x17, 0x60, 0x49, 0xc8, 0xbe, 0x0c, 0x99, 0xa1, 0x7f,
+ 0xba, 0x4b, 0xa6, 0xec, 0xcf, 0xc0, 0x14, 0xab, 0x79, 0x48, 0xa9, 0x8f, 0x9d, 0x90, 0xa7, 0x67,
+ 0x44, 0x4f, 0xb6, 0xff, 0x17, 0x0b, 0xd0, 0xaa, 0x5f, 0x77, 0x37, 0x76, 0x05, 0x71, 0xfe, 0xfd,
+ 0xef, 0x43, 0x99, 0x3f, 0xab, 0x93, 0x11, 0x86, 0x17, 0x1a, 0x4e, 0x18, 0x6a, 0xb2, 0xfc, 0xa7,
+ 0x45, 0xbb, 0xe5, 0xf5, 0xce, 0xe8, 0xb8, 0x1b, 0x3d, 0xf4, 0x4e, 0x22, 0xe0, 0xe3, 0x27, 0x53,
+ 0x01, 0x1f, 0x9f, 0xce, 0x34, 0x02, 0x4a, 0xf7, 0x5e, 0x06, 0x82, 0xb4, 0xbf, 0x6a, 0xc1, 0xc4,
+ 0x5a, 0x22, 0x62, 0xee, 0x79, 0x66, 0x11, 0x91, 0xa1, 0xa3, 0xaa, 0xb2, 0x52, 0x2c, 0xa0, 0x0f,
+ 0x5d, 0x86, 0xfb, 0x03, 0x0b, 0xe2, 0x50, 0x63, 0x47, 0xc0, 0x72, 0x2f, 0x18, 0x2c, 0x77, 0xe6,
+ 0xf3, 0x45, 0x75, 0x27, 0x8f, 0xe3, 0x46, 0xd7, 0xd4, 0x9c, 0x74, 0x78, 0xb9, 0xc4, 0x64, 0xf8,
+ 0x3e, 0x1b, 0x37, 0x27, 0x4e, 0xcd, 0xc6, 0x77, 0x0a, 0x80, 0x14, 0x6e, 0xcf, 0x41, 0x42, 0xd3,
+ 0x35, 0x1e, 0x4e, 0x90, 0xd0, 0x1d, 0x40, 0xcc, 0xa6, 0x27, 0x70, 0xbc, 0x90, 0x93, 0x75, 0x85,
+ 0xd4, 0xfa, 0x70, 0x06, 0x43, 0x33, 0xd2, 0x6b, 0xf8, 0x7a, 0x8a, 0x1a, 0xce, 0x68, 0x41, 0xb3,
+ 0xd5, 0xea, 0xef, 0xd5, 0x56, 0x6b, 0xa0, 0x8b, 0xfb, 0xfb, 0xb7, 0x2c, 0x18, 0x53, 0xc3, 0xf4,
+ 0x21, 0x71, 0xa2, 0x51, 0xfd, 0xc9, 0xb9, 0x57, 0x2a, 0x5a, 0x97, 0x19, 0x33, 0xf0, 0x13, 0x2c,
+ 0x8c, 0x81, 0xd3, 0x70, 0xef, 0x13, 0x15, 0xcb, 0xba, 0x2c, 0xc2, 0x12, 0x88, 0xd2, 0x83, 0xbd,
+ 0xf2, 0x98, 0xfa, 0xc7, 0xed, 0x11, 0xe2, 0x2a, 0xf6, 0xbf, 0xa6, 0x9b, 0xdd, 0x5c, 0x8a, 0xe8,
+ 0x15, 0xe8, 0x6f, 0x6d, 0x39, 0x21, 0x49, 0x38, 0x1b, 0xf6, 0x57, 0x68, 0xe1, 0xc1, 0x5e, 0x79,
+ 0x5c, 0x55, 0x60, 0x25, 0x98, 0x63, 0xf7, 0x1e, 0x7a, 0x35, 0xbd, 0x38, 0xbb, 0x86, 0x5e, 0xfd,
+ 0x1b, 0x0b, 0xfa, 0xd6, 0xe8, 0xed, 0xf5, 0xe8, 0x8f, 0x80, 0x37, 0x8d, 0x23, 0xe0, 0x74, 0x5e,
+ 0x56, 0xa7, 0xdc, 0xdd, 0xbf, 0x9c, 0xd8, 0xfd, 0x67, 0x73, 0x29, 0x74, 0xde, 0xf8, 0x4d, 0x18,
+ 0x61, 0xb9, 0xa2, 0x84, 0x63, 0xe5, 0x4b, 0xc6, 0x86, 0x2f, 0x27, 0x36, 0xfc, 0x84, 0x86, 0xaa,
+ 0xed, 0xf4, 0x67, 0x60, 0x50, 0x78, 0xea, 0x25, 0xa3, 0x41, 0x08, 0x5c, 0x2c, 0xe1, 0xf6, 0xbf,
+ 0x2c, 0x82, 0x91, 0x9b, 0x0a, 0xfd, 0x96, 0x05, 0xb3, 0x01, 0x37, 0xb6, 0xaf, 0x2f, 0xb6, 0x03,
+ 0xd7, 0xdb, 0xac, 0xd6, 0xb6, 0x48, 0xbd, 0xdd, 0x70, 0xbd, 0xcd, 0x95, 0x4d, 0xcf, 0x57, 0xc5,
+ 0x4b, 0xf7, 0x48, 0xad, 0xcd, 0xb4, 0xca, 0x5d, 0x12, 0x61, 0x29, 0x87, 0x95, 0x17, 0xf7, 0xf7,
+ 0xca, 0xb3, 0xf8, 0x50, 0xb4, 0xf1, 0x21, 0xfb, 0x82, 0xfe, 0xd0, 0x82, 0x8b, 0x3c, 0x47, 0x52,
+ 0xef, 0xfd, 0xef, 0x20, 0xe1, 0xa8, 0x48, 0x52, 0x31, 0x91, 0x75, 0x12, 0x34, 0xe7, 0x5f, 0x15,
+ 0x03, 0x7a, 0xb1, 0x72, 0xb8, 0xb6, 0xf0, 0x61, 0x3b, 0x67, 0xff, 0x37, 0x45, 0x18, 0x13, 0x21,
+ 0x3a, 0xc5, 0x1d, 0xf0, 0x8a, 0xb1, 0x24, 0x1e, 0x4f, 0x2c, 0x89, 0x29, 0x03, 0xf9, 0xe1, 0x1c,
+ 0xff, 0x21, 0x4c, 0xd1, 0xc3, 0xf9, 0x2a, 0x71, 0x82, 0xe8, 0x0e, 0x71, 0xb8, 0x09, 0x66, 0xf1,
+ 0xd0, 0xa7, 0xbf, 0x12, 0xac, 0x5f, 0x4f, 0x12, 0xc3, 0x69, 0xfa, 0x3f, 0x4e, 0x77, 0x8e, 0x07,
+ 0x93, 0xa9, 0x28, 0xab, 0xef, 0xc2, 0xb0, 0x72, 0x33, 0x13, 0x87, 0x4e, 0xe7, 0x60, 0xc5, 0x49,
+ 0x0a, 0x5c, 0xe8, 0x19, 0xbb, 0x38, 0xc6, 0xe4, 0xec, 0x5f, 0x2e, 0x18, 0x0d, 0xf2, 0x49, 0x5c,
+ 0x83, 0x21, 0x27, 0x64, 0x01, 0xd4, 0xeb, 0x9d, 0x24, 0xda, 0xa9, 0x66, 0x98, 0x9d, 0xd9, 0x9c,
+ 0xa8, 0x89, 0x15, 0x0d, 0x74, 0x95, 0x1b, 0xba, 0xee, 0x90, 0x4e, 0xe2, 0xec, 0x14, 0x35, 0x90,
+ 0xa6, 0xb0, 0x3b, 0x04, 0x8b, 0xfa, 0xe8, 0x73, 0xdc, 0x12, 0xf9, 0x9a, 0xe7, 0xdf, 0xf5, 0xae,
+ 0xf8, 0xbe, 0x0c, 0xc7, 0xd4, 0x1b, 0xc1, 0x29, 0x69, 0x7f, 0xac, 0xaa, 0x63, 0x93, 0x5a, 0x6f,
+ 0x61, 0xcb, 0xbf, 0x04, 0x2c, 0x27, 0x8c, 0x19, 0xd5, 0x21, 0x44, 0x04, 0x26, 0x44, 0xfc, 0x57,
+ 0x59, 0x26, 0xc6, 0x2e, 0xf3, 0xf9, 0x6d, 0xd6, 0x8e, 0x35, 0x40, 0xd7, 0x4c, 0x12, 0x38, 0x49,
+ 0xd3, 0xde, 0xe2, 0x87, 0xf0, 0x32, 0x71, 0xa2, 0x76, 0x40, 0x42, 0xf4, 0x69, 0x28, 0xa5, 0x5f,
+ 0xc6, 0x42, 0x91, 0x62, 0x31, 0xee, 0xf9, 0xf4, 0xfe, 0x5e, 0xb9, 0x54, 0xcd, 0xc1, 0xc1, 0xb9,
+ 0xb5, 0xed, 0x9f, 0xb7, 0x80, 0xf9, 0xd2, 0x1f, 0x01, 0xe7, 0xf3, 0x29, 0x93, 0xf3, 0x29, 0xe5,
+ 0x4d, 0x67, 0x0e, 0xd3, 0xf3, 0x32, 0x5f, 0xc3, 0x95, 0xc0, 0xbf, 0xb7, 0x2b, 0xac, 0xbe, 0xba,
+ 0x3f, 0xe3, 0xec, 0xaf, 0x5b, 0xc0, 0x12, 0x28, 0x61, 0xfe, 0x6a, 0x97, 0x0a, 0x8e, 0xee, 0x06,
+ 0x0d, 0x9f, 0x86, 0xa1, 0x0d, 0x31, 0xfc, 0x19, 0x42, 0x27, 0xa3, 0xc3, 0x26, 0x6d, 0x39, 0x69,
+ 0xc2, 0x27, 0x56, 0xfc, 0xc3, 0x8a, 0x9a, 0xfd, 0x5f, 0x58, 0x30, 0x93, 0x5f, 0x0d, 0xdd, 0x84,
+ 0x93, 0x01, 0xa9, 0xb5, 0x83, 0x90, 0x6e, 0x09, 0xf1, 0x00, 0x12, 0x1e, 0x60, 0x7c, 0xaa, 0x1f,
+ 0xdb, 0xdf, 0x2b, 0x9f, 0xc4, 0xd9, 0x28, 0x38, 0xaf, 0x2e, 0x7a, 0x0d, 0xc6, 0xdb, 0x21, 0xe7,
+ 0xfc, 0x18, 0xd3, 0x15, 0x8a, 0x28, 0xdd, 0xcc, 0x49, 0xea, 0xa6, 0x01, 0xc1, 0x09, 0x4c, 0xfb,
+ 0xef, 0xf3, 0xe5, 0xa8, 0x2c, 0x5e, 0x9b, 0x30, 0xe5, 0x69, 0xff, 0xe9, 0x0d, 0x28, 0x9f, 0xfa,
+ 0x4f, 0x76, 0xbb, 0xf5, 0xd9, 0x75, 0xa9, 0x79, 0xfb, 0x27, 0xc8, 0xe0, 0x34, 0x65, 0xfb, 0x5f,
+ 0x59, 0x70, 0x52, 0x47, 0xd4, 0x7c, 0xff, 0xba, 0x69, 0x01, 0x17, 0x61, 0xc8, 0x6f, 0x91, 0xc0,
+ 0x89, 0xfc, 0x40, 0x5c, 0x73, 0x17, 0xe4, 0x0a, 0xbd, 0x21, 0xca, 0x0f, 0x44, 0xe2, 0x20, 0x49,
+ 0x5d, 0x96, 0x63, 0x55, 0x13, 0xd9, 0x30, 0xc0, 0x04, 0x88, 0xa1, 0xf0, 0xf2, 0x64, 0x87, 0x16,
+ 0xb3, 0x6c, 0x09, 0xb1, 0x80, 0xd8, 0xdf, 0xb7, 0xf8, 0xfa, 0xd4, 0xbb, 0x8e, 0xde, 0x87, 0xc9,
+ 0xa6, 0x13, 0xd5, 0xb6, 0x96, 0xee, 0xb5, 0x02, 0xae, 0xdc, 0x95, 0xe3, 0xf4, 0x5c, 0xb7, 0x71,
+ 0xd2, 0x3e, 0x32, 0xb6, 0x06, 0x5f, 0x4d, 0x10, 0xc3, 0x29, 0xf2, 0xe8, 0x0e, 0x8c, 0xb0, 0x32,
+ 0xe6, 0x15, 0x1d, 0x76, 0xe2, 0x65, 0xf2, 0x5a, 0x53, 0xc6, 0x41, 0xab, 0x31, 0x1d, 0xac, 0x13,
+ 0xb5, 0xbf, 0x59, 0xe4, 0x87, 0x06, 0x7b, 0x7b, 0x3c, 0x03, 0x83, 0x2d, 0xbf, 0xbe, 0xb0, 0xb2,
+ 0x88, 0xc5, 0x2c, 0xa8, 0x7b, 0xaf, 0xc2, 0x8b, 0xb1, 0x84, 0xa3, 0x0b, 0x30, 0x24, 0x7e, 0x4a,
+ 0x65, 0x3c, 0xdb, 0x23, 0x02, 0x2f, 0xc4, 0x0a, 0x8a, 0x5e, 0x04, 0x68, 0x05, 0xfe, 0x8e, 0x5b,
+ 0x67, 0x51, 0xb0, 0x8a, 0xa6, 0x5d, 0x5f, 0x45, 0x41, 0xb0, 0x86, 0x85, 0x5e, 0x87, 0xb1, 0xb6,
+ 0x17, 0x72, 0xfe, 0x49, 0xcb, 0x35, 0xa0, 0x2c, 0xce, 0x6e, 0xea, 0x40, 0x6c, 0xe2, 0xa2, 0x39,
+ 0x18, 0x88, 0x1c, 0x66, 0xa7, 0xd6, 0x9f, 0xef, 0x31, 0xb0, 0x4e, 0x31, 0xf4, 0xac, 0x7e, 0xb4,
+ 0x02, 0x16, 0x15, 0xd1, 0xbb, 0x32, 0x40, 0x01, 0xbf, 0x89, 0x84, 0xab, 0x4e, 0x6f, 0xb7, 0x96,
+ 0x16, 0x9e, 0x40, 0xb8, 0x00, 0x19, 0xb4, 0xd0, 0x6b, 0x00, 0xe4, 0x5e, 0x44, 0x02, 0xcf, 0x69,
+ 0x28, 0xeb, 0x52, 0xc5, 0xc8, 0x2c, 0xfa, 0x6b, 0x7e, 0x74, 0x33, 0x24, 0x4b, 0x0a, 0x03, 0x6b,
+ 0xd8, 0xf6, 0x4f, 0x8f, 0x00, 0xc4, 0x0f, 0x0d, 0x74, 0x1f, 0x86, 0x6a, 0x4e, 0xcb, 0xa9, 0xf1,
+ 0x94, 0xb5, 0xc5, 0x3c, 0x17, 0xef, 0xb8, 0xc6, 0xec, 0x82, 0x40, 0xe7, 0xca, 0x1b, 0x19, 0xae,
+ 0x7d, 0x48, 0x16, 0x77, 0x55, 0xd8, 0xa8, 0xf6, 0xd0, 0x57, 0x2c, 0x18, 0x11, 0x51, 0xa6, 0xd8,
+ 0x0c, 0x15, 0xf2, 0xf5, 0x6d, 0x5a, 0xfb, 0x73, 0x71, 0x0d, 0xde, 0x85, 0x97, 0xe4, 0x0a, 0xd5,
+ 0x20, 0x5d, 0x7b, 0xa1, 0x37, 0x8c, 0x3e, 0x2e, 0xdf, 0xb6, 0x45, 0x63, 0x28, 0xd5, 0xdb, 0x76,
+ 0x98, 0x5d, 0x35, 0xfa, 0xb3, 0xf6, 0xa6, 0xf1, 0xac, 0xed, 0xcb, 0x77, 0x96, 0x36, 0xf8, 0xed,
+ 0x6e, 0x2f, 0x5a, 0x54, 0xd1, 0xa3, 0xb1, 0xf4, 0xe7, 0x7b, 0xf8, 0x6a, 0x0f, 0xbb, 0x2e, 0x91,
+ 0x58, 0xbe, 0x08, 0x13, 0x75, 0x93, 0x6b, 0x11, 0x2b, 0xf1, 0xe9, 0x3c, 0xba, 0x09, 0x26, 0x27,
+ 0xe6, 0x53, 0x12, 0x00, 0x9c, 0x24, 0x8c, 0x2a, 0x3c, 0x38, 0xcf, 0x8a, 0xb7, 0xe1, 0x0b, 0x77,
+ 0x31, 0x3b, 0x77, 0x2e, 0x77, 0xc3, 0x88, 0x34, 0x29, 0x66, 0xcc, 0x24, 0xac, 0x89, 0xba, 0x58,
+ 0x51, 0x41, 0x6f, 0xc3, 0x00, 0x73, 0xf1, 0x0c, 0x4b, 0x43, 0xf9, 0x6a, 0x0d, 0x33, 0x0a, 0x6d,
+ 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b, 0x0a, 0xe8, 0xaa, 0x74, 0xa0, 0x0e, 0x57, 0xbc, 0x9b, 0x21,
+ 0x61, 0x0e, 0xd4, 0xc3, 0xf3, 0x4f, 0xc6, 0xbe, 0xd1, 0xbc, 0x3c, 0x33, 0xf7, 0xaf, 0x51, 0x93,
+ 0xb2, 0x7d, 0xe2, 0xbf, 0x4c, 0x29, 0x2c, 0x62, 0xe6, 0x65, 0x76, 0xcf, 0x4c, 0x3b, 0x1c, 0x0f,
+ 0xe7, 0x2d, 0x93, 0x04, 0x4e, 0xd2, 0xa4, 0x2c, 0x34, 0xdf, 0xf5, 0xc2, 0xe1, 0xac, 0xdb, 0xd9,
+ 0xc1, 0x25, 0x07, 0xec, 0x36, 0xe2, 0x25, 0x58, 0xd4, 0x47, 0x2e, 0x4c, 0x04, 0x06, 0x7b, 0x21,
+ 0x43, 0xdd, 0x9d, 0xef, 0x8d, 0x89, 0xd1, 0x92, 0x28, 0x98, 0x64, 0x70, 0x92, 0x2e, 0x7a, 0x5b,
+ 0x63, 0x94, 0xc6, 0x3a, 0xbf, 0xfc, 0xbb, 0xb1, 0x46, 0x33, 0xdb, 0x30, 0x66, 0x1c, 0x36, 0x8f,
+ 0x54, 0x05, 0xe9, 0xc1, 0x64, 0xf2, 0x64, 0x79, 0xa4, 0x9a, 0xc7, 0xd7, 0x60, 0x9c, 0x6d, 0x84,
+ 0xbb, 0x4e, 0x4b, 0x1c, 0xc5, 0x17, 0x8c, 0xa3, 0xd8, 0xba, 0x50, 0xe4, 0x03, 0x23, 0x87, 0x20,
+ 0x3e, 0x38, 0xed, 0x7f, 0xd3, 0x2f, 0x2a, 0xab, 0x5d, 0x84, 0x2e, 0xc2, 0xb0, 0xe8, 0x80, 0xca,
+ 0x44, 0xa6, 0x0e, 0x86, 0x55, 0x09, 0xc0, 0x31, 0x0e, 0x4b, 0x40, 0xc7, 0xaa, 0x6b, 0x1e, 0x0a,
+ 0x71, 0x02, 0x3a, 0x05, 0xc1, 0x1a, 0x16, 0x7d, 0xfc, 0xde, 0xf1, 0xfd, 0x48, 0xdd, 0xc1, 0x6a,
+ 0xab, 0xcd, 0xb3, 0x52, 0x2c, 0xa0, 0xf4, 0xee, 0xdd, 0x26, 0x81, 0x47, 0x1a, 0x66, 0x2a, 0x0e,
+ 0x75, 0xf7, 0x5e, 0xd3, 0x81, 0xd8, 0xc4, 0xa5, 0x1c, 0x84, 0x1f, 0xb2, 0xbd, 0x2b, 0x9e, 0xd8,
+ 0xb1, 0xc7, 0x47, 0x95, 0x87, 0xdb, 0x90, 0x70, 0xf4, 0x19, 0x38, 0xa9, 0xc2, 0x5e, 0x8a, 0x95,
+ 0x29, 0x5b, 0x1c, 0x30, 0x24, 0x62, 0x27, 0x17, 0xb2, 0xd1, 0x70, 0x5e, 0x7d, 0xf4, 0x26, 0x8c,
+ 0x8b, 0x67, 0x98, 0xa4, 0x38, 0x68, 0x9a, 0x2f, 0x5e, 0x33, 0xa0, 0x38, 0x81, 0x2d, 0x93, 0x89,
+ 0xb0, 0xf7, 0x89, 0xa4, 0x30, 0x94, 0x4e, 0x26, 0xa2, 0xc3, 0x71, 0xaa, 0x06, 0x9a, 0x83, 0x09,
+ 0xce, 0x76, 0xba, 0xde, 0x26, 0x9f, 0x13, 0xe1, 0x02, 0xab, 0x36, 0xe4, 0x0d, 0x13, 0x8c, 0x93,
+ 0xf8, 0xe8, 0x32, 0x8c, 0x3a, 0x41, 0x6d, 0xcb, 0x8d, 0x48, 0x8d, 0xee, 0x2a, 0x66, 0x41, 0xa8,
+ 0xd9, 0x7f, 0xce, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0xb7, 0xa0, 0x2f, 0xbc, 0xeb, 0xb4, 0xc4, 0xe9,
+ 0x93, 0x7f, 0x94, 0xab, 0x15, 0xcc, 0x4d, 0xbf, 0xe8, 0x7f, 0xcc, 0x6a, 0xda, 0xf7, 0x61, 0x3a,
+ 0x23, 0x3c, 0x10, 0x5d, 0x7a, 0x4e, 0xcb, 0x95, 0xa3, 0x92, 0x70, 0xd3, 0x98, 0xab, 0xac, 0xc8,
+ 0xf1, 0xd0, 0xb0, 0xe8, 0xfa, 0x66, 0x61, 0x84, 0xb4, 0xb4, 0xeb, 0x6a, 0x7d, 0x2f, 0x4b, 0x00,
+ 0x8e, 0x71, 0xec, 0xbf, 0x2d, 0xc0, 0x44, 0x86, 0x7a, 0x90, 0xa5, 0xfe, 0x4e, 0xbc, 0xf3, 0xe2,
+ 0x4c, 0xdf, 0x66, 0x76, 0x9b, 0xc2, 0x21, 0xb2, 0xdb, 0x14, 0xbb, 0x65, 0xb7, 0xe9, 0xfb, 0x20,
+ 0xd9, 0x6d, 0xcc, 0x11, 0xeb, 0xef, 0x69, 0xc4, 0x32, 0x32, 0xe2, 0x0c, 0x1c, 0x32, 0x23, 0x8e,
+ 0x31, 0xe8, 0x83, 0x3d, 0x0c, 0xfa, 0x7f, 0x5c, 0x80, 0xc9, 0xa4, 0x66, 0xf1, 0x08, 0xa4, 0xf3,
+ 0x6f, 0x1b, 0xd2, 0xf9, 0x0b, 0xbd, 0x04, 0x3d, 0xc8, 0x95, 0xd4, 0xe3, 0x84, 0xa4, 0xfe, 0xd9,
+ 0x9e, 0xa8, 0x75, 0x96, 0xda, 0xff, 0xa7, 0x05, 0x38, 0x9e, 0xa9, 0x70, 0x3d, 0x82, 0xb1, 0xb9,
+ 0x61, 0x8c, 0xcd, 0x0b, 0x3d, 0x07, 0x84, 0xc8, 0x1d, 0xa0, 0xdb, 0x89, 0x01, 0xba, 0xd8, 0x3b,
+ 0xc9, 0xce, 0xa3, 0xf4, 0xdd, 0x22, 0x9c, 0xcd, 0xac, 0x17, 0x0b, 0xb7, 0x97, 0x0d, 0xe1, 0xf6,
+ 0x8b, 0x09, 0xe1, 0xb6, 0xdd, 0xb9, 0xf6, 0xc3, 0x91, 0x76, 0x8b, 0xc0, 0x08, 0x2c, 0xbc, 0xcb,
+ 0x03, 0x4a, 0xba, 0x8d, 0xc0, 0x08, 0x8a, 0x10, 0x36, 0xe9, 0xfe, 0x38, 0x49, 0xb8, 0xff, 0x07,
+ 0x0b, 0x4e, 0x65, 0xce, 0xcd, 0x11, 0xc8, 0x19, 0xd7, 0x4c, 0x39, 0xe3, 0x33, 0x3d, 0xaf, 0xd6,
+ 0x1c, 0xc1, 0xe3, 0x57, 0x07, 0x72, 0xbe, 0x85, 0x89, 0x3f, 0x6e, 0xc0, 0x88, 0x53, 0xab, 0x91,
+ 0x30, 0x5c, 0xf5, 0xeb, 0x2a, 0x11, 0xc6, 0x0b, 0xec, 0x71, 0x1a, 0x17, 0x1f, 0xec, 0x95, 0x67,
+ 0x92, 0x24, 0x62, 0x30, 0xd6, 0x29, 0xa0, 0xcf, 0xc1, 0x50, 0x28, 0x73, 0x98, 0xf6, 0x3d, 0x78,
+ 0x0e, 0x53, 0xc6, 0x49, 0x2a, 0xf1, 0x8e, 0x22, 0x89, 0xfe, 0x9e, 0x1e, 0x68, 0xab, 0x83, 0x60,
+ 0x93, 0x77, 0xf2, 0x01, 0xc2, 0x6d, 0x99, 0xee, 0xf0, 0xc5, 0x9e, 0xdc, 0xe1, 0xdf, 0x82, 0xc9,
+ 0x90, 0x07, 0xae, 0x8d, 0x4d, 0x64, 0xf8, 0x5a, 0x64, 0xb1, 0xff, 0xaa, 0x09, 0x18, 0x4e, 0x61,
+ 0xa3, 0x65, 0xd9, 0x2a, 0x33, 0x86, 0xe2, 0xcb, 0xf3, 0x7c, 0xdc, 0xa2, 0x30, 0x88, 0x3a, 0x96,
+ 0x9c, 0x04, 0x36, 0xfc, 0x5a, 0x4d, 0xf4, 0x39, 0x00, 0xba, 0x88, 0x84, 0x08, 0x67, 0x30, 0xff,
+ 0x08, 0xa5, 0x67, 0x4b, 0x3d, 0xd3, 0x03, 0x83, 0x45, 0x34, 0x58, 0x54, 0x44, 0xb0, 0x46, 0x10,
+ 0x39, 0x30, 0x16, 0xff, 0x8b, 0xb3, 0xf3, 0x5f, 0xc8, 0x6d, 0x21, 0x49, 0x9c, 0xa9, 0x37, 0x16,
+ 0x75, 0x12, 0xd8, 0xa4, 0x88, 0x3e, 0x0b, 0xa7, 0x76, 0x72, 0xed, 0x8e, 0x38, 0x2f, 0xc9, 0xd2,
+ 0xed, 0xe7, 0x5b, 0x1b, 0xe5, 0xd7, 0xb7, 0xff, 0x47, 0x80, 0xc7, 0x3a, 0x9c, 0xf4, 0x68, 0xce,
+ 0xb4, 0x19, 0x78, 0x2e, 0x29, 0x57, 0x99, 0xc9, 0xac, 0x6c, 0x08, 0x5a, 0x12, 0x1b, 0xaa, 0xf0,
+ 0x81, 0x37, 0xd4, 0xcf, 0x58, 0xda, 0x33, 0x8b, 0x5b, 0x94, 0x7f, 0xea, 0x90, 0x37, 0xd8, 0x43,
+ 0x14, 0x81, 0x6d, 0x64, 0xc8, 0x91, 0x5e, 0xec, 0xb9, 0x3b, 0xbd, 0x0b, 0x96, 0x7e, 0x25, 0x3b,
+ 0xd4, 0x3f, 0x17, 0x31, 0x5d, 0x39, 0xec, 0xf7, 0x1f, 0x55, 0xd8, 0xff, 0xef, 0x58, 0x70, 0x2a,
+ 0x55, 0xcc, 0xfb, 0x40, 0x42, 0x11, 0x58, 0x70, 0xed, 0x03, 0x77, 0x5e, 0x12, 0xe4, 0xdf, 0x70,
+ 0x55, 0x7c, 0xc3, 0xa9, 0x5c, 0xbc, 0x64, 0xd7, 0xbf, 0xf6, 0xe7, 0xe5, 0x69, 0xd6, 0x80, 0x89,
+ 0x88, 0xf3, 0xbb, 0x8e, 0x5a, 0x70, 0xae, 0xd6, 0x0e, 0x82, 0x78, 0xb1, 0x66, 0x6c, 0x4e, 0xfe,
+ 0x5a, 0x7c, 0x72, 0x7f, 0xaf, 0x7c, 0x6e, 0xa1, 0x0b, 0x2e, 0xee, 0x4a, 0x0d, 0x79, 0x80, 0x9a,
+ 0x29, 0xeb, 0x3e, 0x76, 0x00, 0xe4, 0x48, 0x81, 0xd2, 0xb6, 0x80, 0xdc, 0x4e, 0x37, 0xc3, 0x46,
+ 0x30, 0x83, 0xf2, 0xd1, 0xca, 0x6e, 0x7e, 0x38, 0x79, 0x05, 0x66, 0xae, 0xc3, 0xd9, 0xce, 0x8b,
+ 0xe9, 0x50, 0x21, 0x28, 0xfe, 0xc4, 0x82, 0x33, 0x1d, 0x43, 0xb3, 0xfd, 0x08, 0x3e, 0x16, 0xec,
+ 0x2f, 0x5b, 0xf0, 0x78, 0x66, 0x8d, 0xa4, 0xf3, 0x60, 0x8d, 0x16, 0x6a, 0xc6, 0xb0, 0x71, 0x90,
+ 0x22, 0x09, 0xc0, 0x31, 0x8e, 0x61, 0x2f, 0x5a, 0xe8, 0x6a, 0x2f, 0xfa, 0xbb, 0x16, 0xa4, 0xae,
+ 0xfa, 0x23, 0xe0, 0x3c, 0x57, 0x4c, 0xce, 0xf3, 0xc9, 0x5e, 0x46, 0x33, 0x87, 0xe9, 0xfc, 0xeb,
+ 0x09, 0x38, 0x91, 0xe3, 0x41, 0xbe, 0x03, 0x53, 0x9b, 0x35, 0x62, 0x86, 0x0c, 0xe9, 0x14, 0xfd,
+ 0xaf, 0x63, 0x7c, 0x91, 0xf9, 0xe3, 0xfb, 0x7b, 0xe5, 0xa9, 0x14, 0x0a, 0x4e, 0x37, 0x81, 0xbe,
+ 0x6c, 0xc1, 0x31, 0xe7, 0x6e, 0xb8, 0x44, 0x5f, 0x10, 0x6e, 0x6d, 0xbe, 0xe1, 0xd7, 0xb6, 0x29,
+ 0x63, 0x26, 0xb7, 0xd5, 0xcb, 0x99, 0xa2, 0xf0, 0xdb, 0xd5, 0x14, 0xbe, 0xd1, 0x7c, 0x69, 0x7f,
+ 0xaf, 0x7c, 0x2c, 0x0b, 0x0b, 0x67, 0xb6, 0x85, 0xb0, 0xc8, 0xf5, 0xe6, 0x44, 0x5b, 0x9d, 0x82,
+ 0xda, 0x64, 0xb9, 0xfa, 0x73, 0x96, 0x58, 0x42, 0xb0, 0xa2, 0x83, 0xbe, 0x00, 0xc3, 0x9b, 0x32,
+ 0x7e, 0x45, 0x06, 0xcb, 0x1d, 0x0f, 0x64, 0xe7, 0xa8, 0x1e, 0xdc, 0x00, 0x47, 0x21, 0xe1, 0x98,
+ 0x28, 0x7a, 0x13, 0x8a, 0xde, 0x46, 0xd8, 0x29, 0x98, 0x73, 0xc2, 0xd2, 0x9a, 0x47, 0xbb, 0x5a,
+ 0x5b, 0xae, 0x62, 0x5a, 0x11, 0x5d, 0x85, 0x62, 0x70, 0xa7, 0x2e, 0xf4, 0x38, 0x99, 0x9b, 0x14,
+ 0xcf, 0x2f, 0xe6, 0xf4, 0x8a, 0x51, 0xc2, 0xf3, 0x8b, 0x98, 0x92, 0x40, 0x15, 0xe8, 0x67, 0x6e,
+ 0xd7, 0x82, 0xb5, 0xcd, 0x7c, 0xca, 0x77, 0x08, 0x5f, 0xc0, 0xfd, 0x21, 0x19, 0x02, 0xe6, 0x84,
+ 0xd0, 0x3a, 0x0c, 0xd4, 0x5c, 0xaf, 0x4e, 0x02, 0xc1, 0xcb, 0x7e, 0x3c, 0x53, 0x63, 0xc3, 0x30,
+ 0x72, 0x68, 0x72, 0x05, 0x06, 0xc3, 0xc0, 0x82, 0x16, 0xa3, 0x4a, 0x5a, 0x5b, 0x1b, 0xf2, 0xc6,
+ 0xca, 0xa6, 0x4a, 0x5a, 0x5b, 0xcb, 0xd5, 0x8e, 0x54, 0x19, 0x06, 0x16, 0xb4, 0xd0, 0x6b, 0x50,
+ 0xd8, 0xa8, 0x09, 0x97, 0xea, 0x4c, 0xf1, 0xa6, 0x19, 0xb0, 0x6c, 0x7e, 0x60, 0x7f, 0xaf, 0x5c,
+ 0x58, 0x5e, 0xc0, 0x85, 0x8d, 0x1a, 0x5a, 0x83, 0xc1, 0x0d, 0x1e, 0x2f, 0x48, 0xc8, 0x47, 0x9f,
+ 0xce, 0x0e, 0x65, 0x94, 0x0a, 0x29, 0xc4, 0x7d, 0x5b, 0x05, 0x00, 0x4b, 0x22, 0x2c, 0xf5, 0x98,
+ 0x8a, 0x7b, 0x24, 0x22, 0xc5, 0xce, 0x1e, 0x2e, 0x56, 0x95, 0x08, 0xf9, 0xad, 0xa8, 0x60, 0x8d,
+ 0x22, 0x5d, 0xd5, 0xce, 0xfd, 0x76, 0xc0, 0x72, 0x93, 0x08, 0xc5, 0x4c, 0xe6, 0xaa, 0x9e, 0x93,
+ 0x48, 0x9d, 0x56, 0xb5, 0x42, 0xc2, 0x31, 0x51, 0xb4, 0x0d, 0x63, 0x3b, 0x61, 0x6b, 0x8b, 0xc8,
+ 0x2d, 0xcd, 0x22, 0x0c, 0xe6, 0x70, 0xb3, 0xb7, 0x04, 0xa2, 0x1b, 0x44, 0x6d, 0xa7, 0x91, 0x3a,
+ 0x85, 0xd8, 0xb3, 0xe6, 0x96, 0x4e, 0x0c, 0x9b, 0xb4, 0xe9, 0xf0, 0xbf, 0xdf, 0xf6, 0xef, 0xec,
+ 0x46, 0x44, 0x04, 0x78, 0xcd, 0x1c, 0xfe, 0x77, 0x38, 0x4a, 0x7a, 0xf8, 0x05, 0x00, 0x4b, 0x22,
+ 0xe8, 0x96, 0x18, 0x1e, 0x76, 0x7a, 0x4e, 0xe6, 0x87, 0xa4, 0x9f, 0x93, 0x48, 0x39, 0x83, 0xc2,
+ 0x4e, 0xcb, 0x98, 0x14, 0x3b, 0x25, 0x5b, 0x5b, 0x7e, 0xe4, 0x7b, 0x89, 0x13, 0x7a, 0x2a, 0xff,
+ 0x94, 0xac, 0x64, 0xe0, 0xa7, 0x4f, 0xc9, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, 0xaa, 0xc3, 0x78, 0xcb,
+ 0x0f, 0xa2, 0xbb, 0x7e, 0x20, 0xd7, 0x17, 0xea, 0x20, 0x28, 0x35, 0x30, 0x45, 0x8b, 0xcc, 0x2c,
+ 0xc8, 0x84, 0xe0, 0x04, 0x4d, 0xf4, 0x69, 0x18, 0x0c, 0x6b, 0x4e, 0x83, 0xac, 0xdc, 0x28, 0x4d,
+ 0xe7, 0x5f, 0x3f, 0x55, 0x8e, 0x92, 0xb3, 0xba, 0x78, 0xb8, 0x27, 0x8e, 0x82, 0x25, 0x39, 0xb4,
+ 0x0c, 0xfd, 0x2c, 0xa5, 0x37, 0x8b, 0x46, 0x9c, 0x13, 0x59, 0x3f, 0xe5, 0xd4, 0xc3, 0xcf, 0x26,
+ 0x56, 0x8c, 0x79, 0x75, 0xba, 0x07, 0x84, 0xa4, 0xc0, 0x0f, 0x4b, 0xc7, 0xf3, 0xf7, 0x80, 0x10,
+ 0x30, 0xdc, 0xa8, 0x76, 0xda, 0x03, 0x0a, 0x09, 0xc7, 0x44, 0xe9, 0xc9, 0x4c, 0x4f, 0xd3, 0x13,
+ 0x1d, 0x0c, 0x36, 0x73, 0xcf, 0x52, 0x76, 0x32, 0xd3, 0x93, 0x94, 0x92, 0xb0, 0x7f, 0x7b, 0x28,
+ 0xcd, 0xb3, 0x30, 0x09, 0xd3, 0x7f, 0x68, 0xa5, 0x2c, 0x36, 0x3e, 0xd1, 0xab, 0xc0, 0xfb, 0x21,
+ 0x3e, 0x5c, 0xbf, 0x6c, 0xc1, 0x89, 0x56, 0xe6, 0x87, 0x08, 0x06, 0xa0, 0x37, 0xb9, 0x39, 0xff,
+ 0x74, 0x15, 0xb9, 0x3a, 0x1b, 0x8e, 0x73, 0x5a, 0x4a, 0x0a, 0x07, 0x8a, 0x1f, 0x58, 0x38, 0xb0,
+ 0x0a, 0x43, 0x35, 0xfe, 0x92, 0x93, 0x69, 0x1c, 0x7a, 0x8a, 0xbb, 0xca, 0xf5, 0xb4, 0xa2, 0x22,
+ 0x56, 0x24, 0xd0, 0xcf, 0x5a, 0x70, 0x26, 0xd9, 0x75, 0x4c, 0x18, 0x58, 0x98, 0x6b, 0x72, 0xb1,
+ 0xd6, 0xb2, 0xf8, 0xfe, 0x14, 0xff, 0x6f, 0x20, 0x1f, 0x74, 0x43, 0xc0, 0x9d, 0x1b, 0x43, 0x8b,
+ 0x19, 0x72, 0xb5, 0x01, 0x53, 0x27, 0xd9, 0x83, 0x6c, 0xed, 0x65, 0x18, 0x6d, 0xfa, 0x6d, 0x2f,
+ 0x12, 0x56, 0x97, 0xc2, 0x74, 0x8b, 0x99, 0x2c, 0xad, 0x6a, 0xe5, 0xd8, 0xc0, 0x4a, 0x48, 0xe4,
+ 0x86, 0x1e, 0x58, 0x22, 0xf7, 0x1e, 0x8c, 0x7a, 0x9a, 0x43, 0x42, 0xa7, 0x17, 0xac, 0x90, 0x2e,
+ 0x6a, 0xd8, 0xbc, 0x97, 0x7a, 0x09, 0x36, 0xa8, 0x75, 0x96, 0x96, 0xc1, 0x07, 0x93, 0x96, 0x1d,
+ 0xe9, 0x93, 0xd8, 0xfe, 0xa5, 0x42, 0xc6, 0x8b, 0x81, 0x4b, 0xe5, 0xde, 0x30, 0xa5, 0x72, 0xe7,
+ 0x93, 0x52, 0xb9, 0x94, 0xaa, 0xca, 0x10, 0xc8, 0xf5, 0x9e, 0x4b, 0xb4, 0xe7, 0x58, 0xda, 0xff,
+ 0xc0, 0x82, 0x93, 0x4c, 0xf7, 0x41, 0x1b, 0xf8, 0xc0, 0xfa, 0x0e, 0x66, 0x10, 0x7b, 0x3d, 0x9b,
+ 0x1c, 0xce, 0x6b, 0xc7, 0x6e, 0xc0, 0xb9, 0x6e, 0xf7, 0x2e, 0xb3, 0x2f, 0xae, 0x2b, 0xf3, 0x8a,
+ 0xd8, 0xbe, 0xb8, 0xbe, 0xb2, 0x88, 0x19, 0xa4, 0xd7, 0xb0, 0x8b, 0xf6, 0xff, 0x69, 0x41, 0xb1,
+ 0xe2, 0xd7, 0x8f, 0xe0, 0x45, 0xff, 0x29, 0xe3, 0x45, 0xff, 0x58, 0xf6, 0x8d, 0x5f, 0xcf, 0x55,
+ 0xf6, 0x2d, 0x25, 0x94, 0x7d, 0x67, 0xf2, 0x08, 0x74, 0x56, 0xed, 0xfd, 0xeb, 0x22, 0x8c, 0x54,
+ 0xfc, 0xba, 0xda, 0x67, 0xff, 0xdd, 0x83, 0xb8, 0x11, 0xe5, 0x66, 0x0f, 0xd3, 0x28, 0x33, 0x7b,
+ 0x62, 0x19, 0xf5, 0xe2, 0x47, 0xcc, 0x9b, 0xe8, 0x36, 0x71, 0x37, 0xb7, 0x22, 0x52, 0x4f, 0x7e,
+ 0xce, 0xd1, 0x79, 0x13, 0x7d, 0xaf, 0x08, 0x13, 0x89, 0xd6, 0x51, 0x03, 0xc6, 0x1a, 0xba, 0x2a,
+ 0x49, 0xac, 0xd3, 0x07, 0xd2, 0x42, 0x09, 0x6f, 0x0c, 0xad, 0x08, 0x9b, 0xc4, 0xd1, 0x2c, 0x80,
+ 0xa7, 0xdb, 0xa4, 0xab, 0x98, 0xd0, 0x9a, 0x3d, 0xba, 0x86, 0x81, 0x5e, 0x81, 0x91, 0xc8, 0x6f,
+ 0xf9, 0x0d, 0x7f, 0x73, 0xf7, 0x9a, 0x8a, 0x8f, 0xac, 0x4c, 0x96, 0xd7, 0x63, 0x10, 0xd6, 0xf1,
+ 0xd0, 0x3d, 0x98, 0x52, 0x44, 0xaa, 0x0f, 0x41, 0xbd, 0xc6, 0xc4, 0x26, 0x6b, 0x49, 0x8a, 0x38,
+ 0xdd, 0x08, 0x7a, 0x0d, 0xc6, 0x99, 0xed, 0x34, 0xab, 0x7f, 0x8d, 0xec, 0xca, 0xe0, 0xd2, 0x8c,
+ 0xc3, 0x5e, 0x35, 0x20, 0x38, 0x81, 0x89, 0x16, 0x60, 0xaa, 0xe9, 0x86, 0x89, 0xea, 0x03, 0xac,
+ 0x3a, 0xeb, 0xc0, 0x6a, 0x12, 0x88, 0xd3, 0xf8, 0xf6, 0x2f, 0x88, 0x39, 0xf6, 0x22, 0xf7, 0xa3,
+ 0xed, 0xf8, 0xe1, 0xde, 0x8e, 0xdf, 0xb5, 0x60, 0x92, 0xb6, 0xce, 0x0c, 0x42, 0x25, 0x23, 0xa5,
+ 0xd2, 0x8f, 0x58, 0x1d, 0xd2, 0x8f, 0x9c, 0xa7, 0xc7, 0x76, 0xdd, 0x6f, 0x47, 0x42, 0x3a, 0xaa,
+ 0x9d, 0xcb, 0xb4, 0x14, 0x0b, 0xa8, 0xc0, 0x23, 0x41, 0x20, 0xbc, 0xee, 0x75, 0x3c, 0x12, 0x04,
+ 0x58, 0x40, 0x65, 0x76, 0x92, 0xbe, 0xec, 0xec, 0x24, 0x3c, 0xc8, 0xbc, 0xb0, 0xa3, 0x13, 0x2c,
+ 0xad, 0x16, 0x64, 0x5e, 0x1a, 0xd8, 0xc5, 0x38, 0xf6, 0x3f, 0x2b, 0x42, 0xa9, 0xe2, 0xd7, 0x17,
+ 0x48, 0x10, 0xb9, 0x1b, 0x6e, 0xcd, 0x89, 0x88, 0x96, 0xf9, 0xf6, 0x45, 0x00, 0xe6, 0x44, 0x16,
+ 0x64, 0x45, 0x50, 0xaf, 0x2a, 0x08, 0xd6, 0xb0, 0x28, 0x57, 0xb2, 0x4d, 0x76, 0xb5, 0x9b, 0x57,
+ 0x71, 0x25, 0xd7, 0x78, 0x31, 0x96, 0x70, 0x74, 0x9d, 0x85, 0x32, 0x5a, 0xba, 0xd7, 0x72, 0x03,
+ 0x9e, 0x23, 0x9c, 0xd4, 0x7c, 0xaf, 0x1e, 0x8a, 0xc0, 0x6f, 0x25, 0x11, 0x88, 0x28, 0x05, 0xc7,
+ 0x99, 0xb5, 0x50, 0x05, 0x8e, 0xd5, 0x02, 0x52, 0x27, 0x5e, 0xe4, 0x3a, 0x8d, 0xf9, 0xb6, 0x57,
+ 0x6f, 0xf0, 0x94, 0x3c, 0x7d, 0x46, 0x2e, 0xcf, 0x63, 0x0b, 0x19, 0x38, 0x38, 0xb3, 0xa6, 0xf8,
+ 0x14, 0x46, 0xa4, 0x3f, 0xf5, 0x29, 0xac, 0x9e, 0x84, 0xb3, 0xc6, 0xe3, 0x21, 0x5c, 0xd8, 0x72,
+ 0x5c, 0x8f, 0xd5, 0x1b, 0x48, 0x34, 0x9e, 0x81, 0x83, 0x33, 0x6b, 0xda, 0x3f, 0x28, 0xc2, 0x28,
+ 0x9d, 0x18, 0x65, 0x71, 0xf3, 0xb2, 0x61, 0x71, 0x73, 0x2e, 0x61, 0x71, 0x33, 0xa9, 0xe3, 0x6a,
+ 0xf6, 0x35, 0x6f, 0x03, 0xf2, 0x45, 0x52, 0x82, 0x2b, 0xc4, 0x23, 0x7c, 0xc8, 0x98, 0x90, 0xb1,
+ 0x18, 0xdb, 0xa3, 0xdc, 0x48, 0x61, 0xe0, 0x8c, 0x5a, 0x1f, 0xd9, 0xea, 0x1c, 0xad, 0xad, 0xce,
+ 0xef, 0x58, 0x6c, 0x05, 0x2c, 0xae, 0x55, 0xb9, 0x11, 0x39, 0xba, 0x04, 0x23, 0xec, 0x1a, 0x63,
+ 0xb1, 0x3c, 0xa4, 0x49, 0x0b, 0xcb, 0x33, 0xbb, 0x16, 0x17, 0x63, 0x1d, 0x07, 0x5d, 0x80, 0xa1,
+ 0x90, 0x38, 0x41, 0x6d, 0x4b, 0xdd, 0xe1, 0xc2, 0xfe, 0x84, 0x97, 0x61, 0x05, 0x45, 0xef, 0xc4,
+ 0x11, 0xe1, 0x8b, 0xf9, 0x16, 0xe9, 0x7a, 0x7f, 0xf8, 0x39, 0x98, 0x1f, 0x06, 0xde, 0xbe, 0x0d,
+ 0x28, 0x8d, 0xdf, 0x83, 0x8b, 0x5f, 0xd9, 0x8c, 0x59, 0x3c, 0x9c, 0x8a, 0x57, 0xfc, 0x77, 0x16,
+ 0x8c, 0x57, 0xfc, 0x3a, 0x3d, 0x9f, 0x7f, 0x9c, 0x0e, 0x63, 0x3d, 0x83, 0xc7, 0x40, 0x87, 0x0c,
+ 0x1e, 0x4f, 0x40, 0x7f, 0xc5, 0xaf, 0x77, 0x89, 0xab, 0xfc, 0x6f, 0x2c, 0x18, 0xac, 0xf8, 0xf5,
+ 0x23, 0xd0, 0xae, 0xbd, 0x61, 0x6a, 0xd7, 0x4e, 0xe6, 0xac, 0x9b, 0x1c, 0x85, 0xda, 0xef, 0xf7,
+ 0xc1, 0x18, 0xed, 0xa7, 0xbf, 0x29, 0xa7, 0xd2, 0x18, 0x36, 0xab, 0x87, 0x61, 0xa3, 0x6f, 0x3d,
+ 0xbf, 0xd1, 0xf0, 0xef, 0x26, 0xa7, 0x75, 0x99, 0x95, 0x62, 0x01, 0x45, 0xcf, 0xc3, 0x50, 0x2b,
+ 0x20, 0x3b, 0xae, 0x2f, 0x1e, 0x51, 0x9a, 0xae, 0xb2, 0x22, 0xca, 0xb1, 0xc2, 0x40, 0x2f, 0xc3,
+ 0x68, 0xe8, 0x7a, 0x94, 0x61, 0xe4, 0xb7, 0x52, 0x1f, 0x3b, 0x33, 0x79, 0x9a, 0x39, 0xad, 0x1c,
+ 0x1b, 0x58, 0xe8, 0x36, 0x0c, 0xb3, 0xff, 0xec, 0xd8, 0xe9, 0x3f, 0xf4, 0xb1, 0x23, 0xb2, 0x79,
+ 0x0b, 0x02, 0x38, 0xa6, 0x45, 0xef, 0xe2, 0x48, 0xa6, 0x6a, 0x0a, 0x45, 0x7c, 0x5d, 0x75, 0x17,
+ 0xab, 0x24, 0x4e, 0x21, 0xd6, 0xb0, 0xd0, 0x73, 0x30, 0x1c, 0x39, 0x6e, 0xe3, 0xba, 0xeb, 0x31,
+ 0x23, 0x0d, 0xda, 0x7f, 0x91, 0x54, 0x5b, 0x14, 0xe2, 0x18, 0x4e, 0x19, 0x7e, 0x16, 0x76, 0x6c,
+ 0x7e, 0x37, 0x12, 0xa9, 0x1e, 0x8b, 0x9c, 0xe1, 0xbf, 0xae, 0x4a, 0xb1, 0x86, 0x81, 0xb6, 0xe0,
+ 0xb4, 0xeb, 0xb1, 0x94, 0x6c, 0xa4, 0xba, 0xed, 0xb6, 0xd6, 0xaf, 0x57, 0x6f, 0x91, 0xc0, 0xdd,
+ 0xd8, 0x9d, 0x77, 0x6a, 0xdb, 0xc4, 0xab, 0x33, 0x79, 0xd0, 0xd0, 0xfc, 0x93, 0xa2, 0x8b, 0xa7,
+ 0x57, 0x3a, 0xe0, 0xe2, 0x8e, 0x94, 0x90, 0x4d, 0xb7, 0x63, 0x40, 0x9c, 0xa6, 0x10, 0xfc, 0xf0,
+ 0x74, 0x4e, 0xac, 0x04, 0x0b, 0x88, 0xfd, 0x12, 0xdb, 0x13, 0x37, 0xaa, 0xe8, 0x59, 0xe3, 0x78,
+ 0x39, 0xa1, 0x1f, 0x2f, 0x07, 0x7b, 0xe5, 0x81, 0x1b, 0x55, 0x2d, 0x04, 0xd5, 0x65, 0x38, 0x5e,
+ 0xf1, 0xeb, 0x15, 0x3f, 0x88, 0x96, 0xfd, 0xe0, 0xae, 0x13, 0xd4, 0xe5, 0x12, 0x2c, 0xcb, 0x20,
+ 0x5c, 0xf4, 0x8c, 0xed, 0xe7, 0x27, 0x90, 0x11, 0x60, 0xeb, 0x25, 0xc6, 0xba, 0x1f, 0xd2, 0xe7,
+ 0xb9, 0xc6, 0x98, 0x48, 0x95, 0xf8, 0xf0, 0x8a, 0x13, 0x11, 0x74, 0x03, 0xc6, 0x6a, 0xfa, 0xb5,
+ 0x2d, 0xaa, 0x3f, 0x23, 0x2f, 0x3b, 0xe3, 0x4e, 0xcf, 0xbc, 0xe7, 0xcd, 0xfa, 0xf6, 0x77, 0x2c,
+ 0xd1, 0x0a, 0x17, 0x49, 0x71, 0xe3, 0xe6, 0xee, 0x67, 0xee, 0x02, 0x4c, 0x05, 0x7a, 0x15, 0xcd,
+ 0x48, 0xf0, 0x38, 0xcf, 0x24, 0x95, 0x00, 0xe2, 0x34, 0x3e, 0xfa, 0x2c, 0x9c, 0x32, 0x0a, 0xa5,
+ 0xbd, 0x84, 0x96, 0x24, 0x9e, 0x09, 0xed, 0x70, 0x1e, 0x12, 0xce, 0xaf, 0x6f, 0xff, 0x24, 0x9c,
+ 0x48, 0x7e, 0x97, 0x10, 0xa3, 0x3d, 0xe0, 0xd7, 0x15, 0x0e, 0xf7, 0x75, 0xf6, 0x2b, 0x30, 0x55,
+ 0xf1, 0xb5, 0x00, 0x23, 0x6c, 0xfe, 0xba, 0xc7, 0x39, 0xfb, 0xe5, 0x21, 0x76, 0x0d, 0x26, 0xb2,
+ 0x19, 0xa2, 0xcf, 0xc3, 0x78, 0x48, 0x58, 0x70, 0x3f, 0x29, 0xbe, 0xed, 0x10, 0xb0, 0xa0, 0xba,
+ 0xa4, 0x63, 0xf2, 0x27, 0xaa, 0x59, 0x86, 0x13, 0xd4, 0x50, 0x13, 0xc6, 0xef, 0xba, 0x5e, 0xdd,
+ 0xbf, 0x1b, 0x4a, 0xfa, 0x43, 0xf9, 0xba, 0xa0, 0xdb, 0x1c, 0x33, 0xd1, 0x47, 0xa3, 0xb9, 0xdb,
+ 0x06, 0x31, 0x9c, 0x20, 0x4e, 0x8f, 0x9a, 0xa0, 0xed, 0xcd, 0x85, 0x37, 0x43, 0x12, 0x88, 0xd0,
+ 0x83, 0xec, 0xa8, 0xc1, 0xb2, 0x10, 0xc7, 0x70, 0x7a, 0xd4, 0xb0, 0x3f, 0x2c, 0xe2, 0x01, 0x3b,
+ 0xcb, 0xc4, 0x51, 0x83, 0x55, 0x29, 0xd6, 0x30, 0xe8, 0x51, 0xcc, 0xfe, 0xad, 0xf9, 0x1e, 0xf6,
+ 0xfd, 0x48, 0x1e, 0xde, 0x2c, 0xf5, 0xab, 0x56, 0x8e, 0x0d, 0xac, 0x9c, 0x40, 0x87, 0x7d, 0x87,
+ 0x0d, 0x74, 0x88, 0xa2, 0x0e, 0x41, 0x1e, 0x78, 0xa8, 0xee, 0xcb, 0x9d, 0x82, 0x3c, 0x1c, 0x3c,
+ 0x50, 0x00, 0x08, 0xca, 0x0b, 0x6c, 0x88, 0x01, 0xea, 0xe7, 0x91, 0x1c, 0x99, 0xb6, 0xba, 0xca,
+ 0x47, 0x47, 0xc2, 0xd0, 0x12, 0x0c, 0x86, 0xbb, 0x61, 0x2d, 0x6a, 0x84, 0x9d, 0xd2, 0xfb, 0x56,
+ 0x19, 0x8a, 0x96, 0xb2, 0x9e, 0x57, 0xc1, 0xb2, 0x2e, 0xaa, 0xc1, 0xb4, 0xa0, 0xb8, 0xb0, 0xe5,
+ 0x78, 0x2a, 0xe9, 0x28, 0x37, 0x4b, 0xbd, 0xb4, 0xbf, 0x57, 0x9e, 0x16, 0x2d, 0xeb, 0xe0, 0x83,
+ 0xbd, 0x32, 0xdd, 0x92, 0x19, 0x10, 0x9c, 0x45, 0x8d, 0x2f, 0xf9, 0x5a, 0xcd, 0x6f, 0xb6, 0x2a,
+ 0x81, 0xbf, 0xe1, 0x36, 0x48, 0x27, 0x8d, 0x7f, 0xd5, 0xc0, 0x14, 0x4b, 0xde, 0x28, 0xc3, 0x09,
+ 0x6a, 0xe8, 0x0e, 0x4c, 0x38, 0xad, 0xd6, 0x5c, 0xd0, 0xf4, 0x03, 0xd9, 0xc0, 0x48, 0xbe, 0xea,
+ 0x68, 0xce, 0x44, 0xe5, 0x39, 0x47, 0x13, 0x85, 0x38, 0x49, 0x90, 0x0e, 0x94, 0xd8, 0x68, 0xc6,
+ 0x40, 0x8d, 0xc5, 0x03, 0x25, 0xf6, 0x65, 0xc6, 0x40, 0x65, 0x40, 0x70, 0x16, 0x35, 0xfb, 0xef,
+ 0x33, 0xc6, 0x9f, 0x05, 0x02, 0x67, 0xfe, 0x5f, 0x4d, 0x18, 0x6b, 0xb1, 0x63, 0x5f, 0xe4, 0x03,
+ 0x14, 0x47, 0xc5, 0xcb, 0x3d, 0x4a, 0xa8, 0xef, 0xb2, 0x8c, 0xc6, 0x86, 0xa5, 0x72, 0x45, 0x27,
+ 0x87, 0x4d, 0xea, 0xf6, 0x5f, 0xcc, 0x30, 0xd6, 0xb1, 0xca, 0xc5, 0xce, 0x83, 0xc2, 0x1b, 0x56,
+ 0x08, 0x9a, 0x66, 0xf2, 0x15, 0x3c, 0xf1, 0xfa, 0x12, 0x1e, 0xb5, 0x58, 0xd6, 0x45, 0x9f, 0x83,
+ 0x71, 0xd7, 0x73, 0xe3, 0x4c, 0xe0, 0x61, 0xe9, 0x58, 0x7e, 0x98, 0x35, 0x85, 0xa5, 0xe7, 0x0a,
+ 0xd5, 0x2b, 0xe3, 0x04, 0x31, 0xf4, 0x0e, 0x33, 0xde, 0x95, 0xa4, 0x0b, 0xbd, 0x90, 0xd6, 0xed,
+ 0x74, 0x25, 0x59, 0x8d, 0x08, 0x6a, 0xc3, 0x74, 0x3a, 0x23, 0x7a, 0x58, 0xb2, 0xf3, 0xdf, 0x46,
+ 0xe9, 0xa4, 0xe6, 0x71, 0x52, 0xc7, 0x34, 0x2c, 0xc4, 0x59, 0xf4, 0xd1, 0xf5, 0x64, 0xbe, 0xea,
+ 0xa2, 0xa1, 0x1a, 0x4a, 0xe5, 0xac, 0x1e, 0xeb, 0x98, 0xaa, 0x7a, 0x13, 0xce, 0x68, 0x29, 0x7f,
+ 0xaf, 0x04, 0x0e, 0x33, 0x1e, 0x73, 0xd9, 0x6d, 0xa4, 0x31, 0xb5, 0x8f, 0xef, 0xef, 0x95, 0xcf,
+ 0xac, 0x77, 0x42, 0xc4, 0x9d, 0xe9, 0xa0, 0x1b, 0x70, 0x9c, 0x07, 0x09, 0x5a, 0x24, 0x4e, 0xbd,
+ 0xe1, 0x7a, 0x8a, 0x6b, 0xe6, 0x67, 0xd7, 0xa9, 0xfd, 0xbd, 0xf2, 0xf1, 0xb9, 0x2c, 0x04, 0x9c,
+ 0x5d, 0x0f, 0xbd, 0x01, 0xc3, 0x75, 0x4f, 0x9e, 0xb2, 0x03, 0x46, 0x56, 0xe5, 0xe1, 0xc5, 0xb5,
+ 0xaa, 0xfa, 0xfe, 0xf8, 0x0f, 0x8e, 0x2b, 0xa0, 0x4d, 0xae, 0x9b, 0x54, 0x02, 0xe5, 0xc1, 0x54,
+ 0xec, 0xd8, 0xa4, 0xce, 0xc5, 0x88, 0xba, 0xc1, 0x95, 0xf2, 0xca, 0x33, 0xd3, 0x08, 0xc8, 0x61,
+ 0x10, 0x46, 0x6f, 0x03, 0x12, 0xa9, 0xb0, 0xe6, 0x6a, 0x2c, 0xd9, 0xa4, 0x66, 0x30, 0xac, 0x44,
+ 0x08, 0xd5, 0x14, 0x06, 0xce, 0xa8, 0x85, 0xae, 0xd2, 0xe3, 0x51, 0x2f, 0x15, 0xc7, 0xaf, 0xca,
+ 0xdd, 0xbf, 0x48, 0x5a, 0x01, 0x61, 0x36, 0xae, 0x26, 0x45, 0x9c, 0xa8, 0x87, 0xea, 0x70, 0xda,
+ 0x69, 0x47, 0x3e, 0x53, 0xfb, 0x9a, 0xa8, 0xeb, 0xfe, 0x36, 0xf1, 0x98, 0xc5, 0xc5, 0x10, 0x8b,
+ 0x49, 0x7b, 0x7a, 0xae, 0x03, 0x1e, 0xee, 0x48, 0x85, 0x3e, 0xa7, 0xe8, 0x58, 0x68, 0x1a, 0x59,
+ 0x23, 0x80, 0x00, 0x37, 0x53, 0x90, 0x18, 0xe8, 0x15, 0x18, 0xd9, 0xf2, 0xc3, 0x68, 0x8d, 0x44,
+ 0x77, 0xfd, 0x60, 0x5b, 0xe4, 0xde, 0x88, 0xf3, 0x1d, 0xc5, 0x20, 0xac, 0xe3, 0xa1, 0x67, 0x60,
+ 0x90, 0xd9, 0x03, 0xae, 0x2c, 0xb2, 0xbb, 0x76, 0x28, 0x3e, 0x63, 0xae, 0xf2, 0x62, 0x2c, 0xe1,
+ 0x12, 0x75, 0xa5, 0xb2, 0xc0, 0x8e, 0xe3, 0x04, 0xea, 0x4a, 0x65, 0x01, 0x4b, 0x38, 0x5d, 0xae,
+ 0xe1, 0x96, 0x13, 0x90, 0x4a, 0xe0, 0xd7, 0x48, 0xa8, 0x65, 0xd9, 0x7a, 0x8c, 0x67, 0x16, 0xa1,
+ 0xcb, 0xb5, 0x9a, 0x85, 0x80, 0xb3, 0xeb, 0x21, 0x92, 0x4e, 0x77, 0x3d, 0x9e, 0xaf, 0x0f, 0x4f,
+ 0xb3, 0x83, 0x3d, 0x66, 0xbc, 0xf6, 0x60, 0x52, 0x25, 0xda, 0xe6, 0xb9, 0x44, 0xc2, 0xd2, 0x04,
+ 0x5b, 0xdb, 0xbd, 0x27, 0x22, 0x51, 0x16, 0x06, 0x2b, 0x09, 0x4a, 0x38, 0x45, 0xdb, 0x08, 0x7a,
+ 0x3c, 0xd9, 0x35, 0xe8, 0xf1, 0x45, 0x18, 0x0e, 0xdb, 0x77, 0xea, 0x7e, 0xd3, 0x71, 0x3d, 0x66,
+ 0x56, 0xa5, 0x3d, 0xdc, 0xab, 0x12, 0x80, 0x63, 0x1c, 0xb4, 0x0c, 0x43, 0x8e, 0x34, 0x1f, 0x40,
+ 0xf9, 0xf1, 0x1c, 0x95, 0xd1, 0x00, 0x0f, 0x71, 0x26, 0x0d, 0x06, 0x54, 0x5d, 0xf4, 0x3a, 0x8c,
+ 0x89, 0x98, 0x31, 0x42, 0x54, 0x3d, 0x6d, 0x7a, 0xb9, 0x57, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x4d,
+ 0x18, 0x89, 0xfc, 0x86, 0x90, 0x71, 0x86, 0xa5, 0x13, 0xf9, 0x61, 0x97, 0xd7, 0x15, 0x9a, 0xae,
+ 0xd8, 0x52, 0x55, 0xb1, 0x4e, 0x07, 0xad, 0xf3, 0xf5, 0xce, 0x72, 0x6a, 0x91, 0xb0, 0x74, 0x32,
+ 0xff, 0x4e, 0x52, 0xa9, 0xb7, 0xcc, 0xed, 0x20, 0x6a, 0x62, 0x9d, 0x0c, 0xba, 0x02, 0x53, 0xad,
+ 0xc0, 0xf5, 0xd9, 0x9a, 0x50, 0xe6, 0x10, 0x25, 0x33, 0xe9, 0x6f, 0x25, 0x89, 0x80, 0xd3, 0x75,
+ 0x58, 0xc8, 0x1f, 0x51, 0x58, 0x3a, 0xc5, 0xb3, 0x00, 0x72, 0x39, 0x08, 0x2f, 0xc3, 0x0a, 0x8a,
+ 0x56, 0xd9, 0x49, 0xcc, 0x45, 0x78, 0xa5, 0x99, 0xfc, 0x40, 0x12, 0xba, 0xa8, 0x8f, 0xf3, 0xfe,
+ 0xea, 0x2f, 0x8e, 0x29, 0xa0, 0x3a, 0xc4, 0xa9, 0xfe, 0xe9, 0x0b, 0x2a, 0x2c, 0x9d, 0xee, 0x60,
+ 0x94, 0x9d, 0x78, 0x2e, 0xc7, 0x0c, 0x81, 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x16, 0x4c, 0x8a,
+ 0x78, 0x18, 0xf1, 0x30, 0x9d, 0x89, 0x1d, 0xd7, 0x70, 0x02, 0x86, 0x53, 0xd8, 0x3c, 0x0b, 0x9f,
+ 0x73, 0xa7, 0x41, 0xc4, 0xd1, 0x77, 0xdd, 0xf5, 0xb6, 0xc3, 0xd2, 0x59, 0x76, 0x3e, 0x88, 0x2c,
+ 0x7c, 0x49, 0x28, 0xce, 0xa8, 0x81, 0xd6, 0x61, 0xb2, 0x15, 0x10, 0xd2, 0x64, 0xef, 0x24, 0x71,
+ 0x9f, 0x95, 0x79, 0xc4, 0x2b, 0xda, 0x93, 0x4a, 0x02, 0x76, 0x90, 0x51, 0x86, 0x53, 0x14, 0xd0,
+ 0x5d, 0x18, 0xf2, 0x77, 0x48, 0xb0, 0x45, 0x9c, 0x7a, 0xe9, 0x5c, 0x07, 0x77, 0x4a, 0x71, 0xb9,
+ 0xdd, 0x10, 0xb8, 0x09, 0x6b, 0x33, 0x59, 0xdc, 0xdd, 0xda, 0x4c, 0x36, 0x86, 0xfe, 0x23, 0x0b,
+ 0x4e, 0x49, 0xfd, 0x6d, 0xb5, 0x45, 0x47, 0x7d, 0xc1, 0xf7, 0xc2, 0x28, 0xe0, 0x31, 0x9a, 0x1e,
+ 0xcf, 0x8f, 0x5b, 0xb4, 0x9e, 0x53, 0x49, 0x69, 0x11, 0x4e, 0xe5, 0x61, 0x84, 0x38, 0xbf, 0x45,
+ 0xfa, 0xb2, 0x0f, 0x49, 0x24, 0x0f, 0xa3, 0xb9, 0x70, 0xf9, 0x9d, 0xc5, 0xb5, 0xd2, 0x13, 0x3c,
+ 0xc0, 0x14, 0xdd, 0x0c, 0xd5, 0x24, 0x10, 0xa7, 0xf1, 0xd1, 0x25, 0x28, 0xf8, 0x61, 0xe9, 0x49,
+ 0xb6, 0xb6, 0x4f, 0xe5, 0x8c, 0xe3, 0x8d, 0x2a, 0xb7, 0x3a, 0xbe, 0x51, 0xc5, 0x05, 0x3f, 0x94,
+ 0x99, 0xf0, 0xe8, 0x73, 0x36, 0x2c, 0x3d, 0xc5, 0x65, 0xce, 0x32, 0x13, 0x1e, 0x2b, 0xc4, 0x31,
+ 0x1c, 0x6d, 0xc1, 0x44, 0x68, 0x88, 0x0d, 0xc2, 0xd2, 0x79, 0x36, 0x52, 0x4f, 0xe5, 0x4d, 0x9a,
+ 0x81, 0xad, 0xa5, 0xa8, 0x32, 0xa9, 0xe0, 0x24, 0x59, 0xbe, 0xbb, 0x34, 0xc1, 0x45, 0x58, 0x7a,
+ 0xba, 0xcb, 0xee, 0xd2, 0x90, 0xf5, 0xdd, 0xa5, 0xd3, 0xc0, 0x09, 0x9a, 0xe8, 0xa6, 0xee, 0xab,
+ 0x7a, 0x21, 0xdf, 0x82, 0x35, 0xd3, 0x4b, 0x75, 0x2c, 0xd7, 0x43, 0xf5, 0x2d, 0x98, 0x94, 0x77,
+ 0x09, 0x5d, 0x99, 0x81, 0x5b, 0x27, 0xa5, 0x67, 0xe2, 0x4d, 0x7b, 0x35, 0x01, 0xc3, 0x29, 0xec,
+ 0x99, 0x9f, 0x80, 0xa9, 0x14, 0x1f, 0x77, 0x18, 0xd7, 0x9f, 0x99, 0x6d, 0x18, 0x33, 0xf6, 0xca,
+ 0x23, 0xb5, 0x0c, 0xfb, 0xa7, 0x00, 0xc3, 0xca, 0x62, 0x27, 0x47, 0x53, 0x37, 0xf5, 0x40, 0x9a,
+ 0xba, 0x8b, 0xa6, 0x61, 0xd9, 0xa9, 0xa4, 0x61, 0xd9, 0x50, 0xc5, 0xaf, 0x1b, 0xb6, 0x64, 0xeb,
+ 0x19, 0xc1, 0xa1, 0xf3, 0x4e, 0xf9, 0xde, 0x7d, 0x1d, 0x35, 0x65, 0x57, 0xb1, 0x67, 0x0b, 0xb5,
+ 0xbe, 0x8e, 0xfa, 0xb3, 0x2b, 0x30, 0xe5, 0xf9, 0xec, 0x21, 0x42, 0xea, 0x92, 0xcb, 0x64, 0xcc,
+ 0xe4, 0xb0, 0x1e, 0xbc, 0x30, 0x81, 0x80, 0xd3, 0x75, 0x68, 0x83, 0x9c, 0x1b, 0x4c, 0x2a, 0xec,
+ 0x38, 0xb3, 0x88, 0x05, 0x94, 0x3e, 0x80, 0xf9, 0xaf, 0xb0, 0x34, 0x99, 0xff, 0x00, 0xe6, 0x95,
+ 0x92, 0x1c, 0x67, 0x28, 0x39, 0x4e, 0xa6, 0x9f, 0x6a, 0xf9, 0xf5, 0x95, 0x8a, 0x78, 0xcb, 0x68,
+ 0x69, 0x1b, 0xea, 0x2b, 0x15, 0xcc, 0x61, 0x68, 0x0e, 0x06, 0xd8, 0x0f, 0x19, 0x14, 0x2a, 0xef,
+ 0x2c, 0x5a, 0xa9, 0x68, 0xe9, 0x86, 0x59, 0x05, 0x2c, 0x2a, 0x32, 0xfd, 0x03, 0x7d, 0x00, 0x32,
+ 0xfd, 0xc3, 0xe0, 0x03, 0xea, 0x1f, 0x24, 0x01, 0x1c, 0xd3, 0x42, 0xf7, 0xe0, 0xb8, 0xf1, 0xe8,
+ 0x56, 0xce, 0x9f, 0x90, 0x6f, 0x7f, 0x92, 0x40, 0x9e, 0x3f, 0x23, 0x3a, 0x7d, 0x7c, 0x25, 0x8b,
+ 0x12, 0xce, 0x6e, 0x00, 0x35, 0x60, 0xaa, 0x96, 0x6a, 0x75, 0xa8, 0xf7, 0x56, 0xd5, 0xba, 0x48,
+ 0xb7, 0x98, 0x26, 0x8c, 0x5e, 0x87, 0xa1, 0xf7, 0x7d, 0x6e, 0x2b, 0x2a, 0xde, 0x5f, 0x32, 0x74,
+ 0xd1, 0xd0, 0x3b, 0x37, 0xaa, 0xac, 0xfc, 0x60, 0xaf, 0x3c, 0x52, 0xf1, 0xeb, 0xf2, 0x2f, 0x56,
+ 0x15, 0xd0, 0x3f, 0xb2, 0x60, 0x26, 0xfd, 0xaa, 0x57, 0x9d, 0x1e, 0xeb, 0xbd, 0xd3, 0xb6, 0x68,
+ 0x74, 0x66, 0x29, 0x97, 0x1c, 0xee, 0xd0, 0x14, 0xfa, 0x24, 0xdd, 0x4f, 0xa1, 0x7b, 0x9f, 0x3b,
+ 0x7f, 0x68, 0xba, 0x7a, 0xcc, 0x4a, 0x0f, 0xf6, 0xca, 0x13, 0xfc, 0xf8, 0x77, 0xef, 0xab, 0x04,
+ 0x13, 0xbc, 0x02, 0xfa, 0x49, 0x38, 0x1e, 0xa4, 0xa5, 0xec, 0x44, 0xbe, 0x34, 0x9e, 0xed, 0xe5,
+ 0x2a, 0x49, 0x4e, 0x38, 0xce, 0x22, 0x88, 0xb3, 0xdb, 0xb1, 0x7f, 0xd3, 0x62, 0xda, 0x15, 0xd1,
+ 0x2d, 0x12, 0xb6, 0x1b, 0xd1, 0x11, 0xd8, 0x67, 0x2e, 0x19, 0xd6, 0x0d, 0x0f, 0x6c, 0x60, 0xf9,
+ 0xdf, 0x5a, 0xcc, 0xc0, 0xf2, 0x08, 0x5d, 0x45, 0xdf, 0x81, 0xa1, 0x48, 0xb4, 0x26, 0xba, 0x9e,
+ 0x67, 0x0c, 0x26, 0x3b, 0xc5, 0x8c, 0x4c, 0xd5, 0x4b, 0x4e, 0x96, 0x62, 0x45, 0xc6, 0xfe, 0xaf,
+ 0xf8, 0x0c, 0x48, 0xc8, 0x11, 0x28, 0x91, 0x17, 0x4d, 0x25, 0x72, 0xb9, 0xcb, 0x17, 0xe4, 0x28,
+ 0x93, 0xff, 0x4b, 0xb3, 0xdf, 0x4c, 0x82, 0xf9, 0x61, 0xb7, 0xec, 0xb5, 0xbf, 0x6a, 0x01, 0xc4,
+ 0x19, 0x7d, 0x7a, 0xc8, 0xcd, 0x7e, 0x99, 0xbe, 0xdd, 0xfc, 0xc8, 0xaf, 0xf9, 0x0d, 0xa1, 0xc4,
+ 0x3a, 0x1d, 0xeb, 0xb1, 0x79, 0xf9, 0x81, 0xf6, 0x1b, 0x2b, 0x6c, 0x54, 0x96, 0x21, 0xb6, 0x8b,
+ 0xb1, 0x65, 0x85, 0x11, 0x5e, 0xfb, 0x1b, 0x16, 0x1c, 0xcb, 0xf2, 0x3b, 0x42, 0xcf, 0xc3, 0x10,
+ 0x97, 0xe5, 0x2a, 0xab, 0x6b, 0x35, 0x9b, 0xb7, 0x44, 0x39, 0x56, 0x18, 0x3d, 0x27, 0xbd, 0x3f,
+ 0x5c, 0xb6, 0x99, 0x1b, 0x30, 0x56, 0x09, 0x88, 0xc6, 0x5f, 0xbc, 0x19, 0x27, 0xc2, 0x1a, 0x9e,
+ 0x7f, 0xfe, 0xd0, 0xc1, 0xbc, 0xec, 0x6f, 0x16, 0xe0, 0x18, 0xb7, 0x1d, 0x9c, 0xdb, 0xf1, 0xdd,
+ 0x7a, 0xc5, 0xaf, 0x0b, 0x6f, 0xf1, 0x77, 0x61, 0xb4, 0xa5, 0x09, 0xe0, 0x3b, 0x65, 0x4e, 0xd0,
+ 0x05, 0xf5, 0xb1, 0xc8, 0x50, 0x2f, 0xc5, 0x06, 0x2d, 0x54, 0x87, 0x51, 0xb2, 0xe3, 0xd6, 0x94,
+ 0x6d, 0x52, 0xe1, 0xd0, 0x97, 0xb4, 0x6a, 0x65, 0x49, 0xa3, 0x83, 0x0d, 0xaa, 0x3d, 0x5b, 0xfc,
+ 0x6b, 0x2c, 0x5a, 0x5f, 0x17, 0x7b, 0xa4, 0x9f, 0xb3, 0xe0, 0x64, 0x4e, 0x9e, 0x05, 0xda, 0xdc,
+ 0x5d, 0x66, 0xa5, 0x29, 0x96, 0xad, 0x6a, 0x8e, 0xdb, 0x6e, 0x62, 0x01, 0x45, 0x9f, 0x06, 0x68,
+ 0xc5, 0xd9, 0x69, 0xbb, 0x04, 0xa4, 0x37, 0x42, 0x53, 0x6b, 0x51, 0x86, 0x55, 0x12, 0x5b, 0x8d,
+ 0x96, 0xfd, 0x8d, 0x3e, 0xe8, 0x67, 0x66, 0x60, 0xa8, 0x02, 0x83, 0x5b, 0x3c, 0x08, 0x66, 0xc7,
+ 0x79, 0xa3, 0xb8, 0x32, 0xaa, 0x66, 0x3c, 0x6f, 0x5a, 0x29, 0x96, 0x64, 0xd0, 0x2a, 0x4c, 0xf3,
+ 0xcc, 0xbb, 0x8d, 0x45, 0xd2, 0x70, 0x76, 0xa5, 0x6c, 0xbb, 0xc0, 0x3e, 0x55, 0xc9, 0xf8, 0x57,
+ 0xd2, 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x13, 0xc6, 0x23, 0xb7, 0x49, 0xfc, 0x76, 0x64, 0x5a, 0x3c,
+ 0xaa, 0xe7, 0xd7, 0xba, 0x01, 0xc5, 0x09, 0x6c, 0xf4, 0x3a, 0x8c, 0xb5, 0x52, 0x52, 0xfc, 0xfe,
+ 0x58, 0xdc, 0x65, 0x4a, 0xee, 0x4d, 0x5c, 0xe6, 0x7a, 0xd4, 0x66, 0x8e, 0x56, 0xeb, 0x5b, 0x01,
+ 0x09, 0xb7, 0xfc, 0x46, 0x9d, 0x71, 0xc0, 0xfd, 0x9a, 0xeb, 0x51, 0x02, 0x8e, 0x53, 0x35, 0x28,
+ 0x95, 0x0d, 0xc7, 0x6d, 0xb4, 0x03, 0x12, 0x53, 0x19, 0x30, 0xa9, 0x2c, 0x27, 0xe0, 0x38, 0x55,
+ 0xa3, 0xbb, 0x7a, 0x62, 0xf0, 0xe1, 0xa8, 0x27, 0xec, 0x7f, 0x5b, 0x00, 0x63, 0x6a, 0x7f, 0x8c,
+ 0x13, 0xe9, 0xbe, 0x01, 0x7d, 0x9b, 0x41, 0xab, 0x26, 0x4c, 0x1e, 0x33, 0xbf, 0xec, 0x0a, 0xae,
+ 0x2c, 0xe8, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8b, 0xee, 0xf1, 0xe3, 0xc2, 0x00, 0x58, 0xc6, 0xc9,
+ 0x55, 0x1e, 0x7e, 0x83, 0xf2, 0xc5, 0xdf, 0x21, 0xa2, 0xbc, 0x70, 0x53, 0x52, 0x26, 0xc4, 0x9a,
+ 0xca, 0x59, 0xbc, 0xf7, 0x25, 0x15, 0x74, 0x09, 0x46, 0x44, 0x6e, 0x53, 0xe6, 0x88, 0xc6, 0x37,
+ 0x13, 0xb3, 0x66, 0x5c, 0x8c, 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0xe3, 0x02, 0x4c, 0x67, 0x78, 0x12,
+ 0xf3, 0x6b, 0x64, 0xd3, 0x0d, 0xa3, 0x60, 0x37, 0x79, 0x39, 0x61, 0x51, 0x8e, 0x15, 0x06, 0x3d,
+ 0xab, 0xf8, 0x45, 0x95, 0xbc, 0x9c, 0x84, 0xa7, 0x9e, 0x80, 0x1e, 0xee, 0x72, 0xa2, 0xd7, 0x76,
+ 0x3b, 0x24, 0x32, 0x79, 0x85, 0xba, 0xb6, 0x99, 0xe9, 0x03, 0x83, 0xd0, 0x27, 0xe0, 0xa6, 0xd2,
+ 0xe7, 0x6b, 0x4f, 0x40, 0xae, 0xd1, 0xe7, 0x30, 0xda, 0xb9, 0x88, 0x78, 0x8e, 0x17, 0x89, 0x87,
+ 0x62, 0x1c, 0xd4, 0x9c, 0x95, 0x62, 0x01, 0xb5, 0xbf, 0x5e, 0x84, 0x53, 0xb9, 0xb1, 0x05, 0x68,
+ 0xd7, 0x9b, 0xbe, 0xe7, 0x46, 0xbe, 0x32, 0x13, 0xe5, 0x81, 0xcc, 0x49, 0x6b, 0x6b, 0x55, 0x94,
+ 0x63, 0x85, 0x81, 0xce, 0x43, 0x3f, 0x93, 0xfc, 0x27, 0xf3, 0x1a, 0xe2, 0xf9, 0x45, 0x1e, 0xe6,
+ 0x95, 0x83, 0xb5, 0x5b, 0xbd, 0xd8, 0xf1, 0x56, 0x7f, 0x82, 0x72, 0x30, 0x7e, 0x23, 0x79, 0xa1,
+ 0xd0, 0xee, 0xfa, 0x7e, 0x03, 0x33, 0x20, 0x7a, 0x4a, 0x8c, 0x57, 0xc2, 0x2e, 0x12, 0x3b, 0x75,
+ 0x3f, 0xd4, 0x06, 0x8d, 0xdb, 0x60, 0x07, 0xae, 0xb7, 0x99, 0xb4, 0x97, 0xbd, 0xc6, 0x8b, 0xb1,
+ 0x84, 0xd3, 0xbd, 0x14, 0xa7, 0x67, 0x1f, 0xcc, 0xdf, 0x4b, 0x2a, 0x09, 0x7b, 0xd7, 0xcc, 0xec,
+ 0xfa, 0x0a, 0x18, 0xea, 0xca, 0x9e, 0xfc, 0x4c, 0x11, 0x26, 0xf0, 0xfc, 0xe2, 0x47, 0x13, 0x71,
+ 0x33, 0x3d, 0x11, 0x0f, 0x23, 0x4f, 0xfe, 0xe1, 0x66, 0xe3, 0xd7, 0x2c, 0x98, 0x60, 0x19, 0x56,
+ 0x45, 0x60, 0x20, 0xd7, 0xf7, 0x8e, 0xe0, 0x29, 0xf0, 0x04, 0xf4, 0x07, 0xb4, 0x51, 0x31, 0x83,
+ 0x6a, 0x8f, 0xb3, 0x9e, 0x60, 0x0e, 0x43, 0xa7, 0xa1, 0x8f, 0x75, 0x81, 0x4e, 0xde, 0x28, 0x3f,
+ 0x82, 0x17, 0x9d, 0xc8, 0xc1, 0xac, 0x94, 0x85, 0x28, 0xc5, 0xa4, 0xd5, 0x70, 0x79, 0xa7, 0x63,
+ 0xbb, 0x8c, 0x0f, 0x47, 0xd4, 0xa1, 0xcc, 0xae, 0x7d, 0xb0, 0x10, 0xa5, 0xd9, 0x24, 0x3b, 0x3f,
+ 0xb3, 0xff, 0xaa, 0x00, 0x67, 0x33, 0xeb, 0xf5, 0x1c, 0xa2, 0xb4, 0x73, 0xed, 0x47, 0x99, 0x8f,
+ 0xb1, 0x78, 0x84, 0xde, 0x08, 0x7d, 0xbd, 0x72, 0xff, 0xfd, 0x3d, 0x44, 0x0e, 0xcd, 0x1c, 0xb2,
+ 0x0f, 0x49, 0xe4, 0xd0, 0xcc, 0xbe, 0xe5, 0x88, 0x09, 0x7e, 0x50, 0xc8, 0xf9, 0x16, 0x26, 0x30,
+ 0xb8, 0x40, 0xcf, 0x19, 0x06, 0x0c, 0xe5, 0x23, 0x9c, 0x9f, 0x31, 0xbc, 0x0c, 0x2b, 0x28, 0x9a,
+ 0x83, 0x89, 0xa6, 0xeb, 0xd1, 0xc3, 0x67, 0xd7, 0x64, 0xc5, 0x95, 0xc2, 0x66, 0xd5, 0x04, 0xe3,
+ 0x24, 0x3e, 0x72, 0xb5, 0xa8, 0xa2, 0xfc, 0xeb, 0x5e, 0x3f, 0xd4, 0xae, 0x9b, 0x35, 0x6d, 0x56,
+ 0xd4, 0x28, 0x66, 0x44, 0x18, 0x5d, 0xd5, 0xe4, 0x44, 0xc5, 0xde, 0xe5, 0x44, 0xa3, 0xd9, 0x32,
+ 0xa2, 0x99, 0xd7, 0x61, 0xec, 0x81, 0xf5, 0x2c, 0xf6, 0x77, 0x8b, 0xf0, 0x58, 0x87, 0x6d, 0xcf,
+ 0xcf, 0x7a, 0x63, 0x0e, 0xb4, 0xb3, 0x3e, 0x35, 0x0f, 0x15, 0x38, 0xb6, 0xd1, 0x6e, 0x34, 0x76,
+ 0x99, 0x6f, 0x25, 0xa9, 0x4b, 0x0c, 0xc1, 0x53, 0x2a, 0xef, 0xab, 0xe5, 0x0c, 0x1c, 0x9c, 0x59,
+ 0x93, 0x3e, 0xb1, 0xe8, 0x4d, 0xb2, 0xab, 0x48, 0x25, 0x9e, 0x58, 0x58, 0x07, 0x62, 0x13, 0x17,
+ 0x5d, 0x81, 0x29, 0x67, 0xc7, 0x71, 0x79, 0x3e, 0x1b, 0x49, 0x80, 0xbf, 0xb1, 0x94, 0x2c, 0x7a,
+ 0x2e, 0x89, 0x80, 0xd3, 0x75, 0x72, 0x54, 0x42, 0xc5, 0x07, 0x52, 0x09, 0x99, 0xf1, 0x2d, 0x07,
+ 0xf2, 0xe3, 0x5b, 0x76, 0x3e, 0x17, 0xbb, 0xa6, 0x02, 0x7d, 0x0f, 0xc6, 0x0e, 0x6b, 0x7b, 0xfe,
+ 0x0c, 0x0c, 0x06, 0x3c, 0xd7, 0x7c, 0xd2, 0x65, 0x50, 0xa6, 0xa0, 0x97, 0x70, 0xfb, 0x7f, 0xb5,
+ 0x40, 0xc9, 0x92, 0xcd, 0x50, 0xf6, 0xaf, 0x33, 0x43, 0x7a, 0x2e, 0x05, 0xd7, 0xdc, 0x15, 0x8f,
+ 0x6b, 0x86, 0xf4, 0x31, 0x10, 0x9b, 0xb8, 0x7c, 0xb9, 0x85, 0x71, 0xd0, 0x14, 0xe3, 0x01, 0x21,
+ 0x74, 0x98, 0x0a, 0x03, 0x7d, 0x06, 0x06, 0xeb, 0xee, 0x8e, 0x1b, 0x0a, 0x39, 0xda, 0xa1, 0x75,
+ 0x80, 0xf1, 0xf7, 0x2d, 0x72, 0x32, 0x58, 0xd2, 0xb3, 0xff, 0x89, 0x05, 0x4a, 0xf9, 0x7a, 0x95,
+ 0x38, 0x8d, 0x68, 0x0b, 0xbd, 0x05, 0x20, 0x29, 0x28, 0xd9, 0x9b, 0x34, 0x09, 0x03, 0xac, 0x20,
+ 0x07, 0xc6, 0x3f, 0xac, 0xd5, 0x41, 0x6f, 0xc2, 0xc0, 0x16, 0xa3, 0x25, 0xbe, 0xed, 0xbc, 0x52,
+ 0x75, 0xb1, 0xd2, 0x83, 0xbd, 0xf2, 0x31, 0xb3, 0x4d, 0x79, 0x8b, 0xf1, 0x5a, 0xf6, 0xcf, 0x14,
+ 0xe2, 0x39, 0x7d, 0xa7, 0xed, 0x47, 0xce, 0x11, 0x70, 0x22, 0x57, 0x0c, 0x4e, 0xe4, 0xa9, 0x4e,
+ 0xda, 0x65, 0xd6, 0xa5, 0x5c, 0x0e, 0xe4, 0x46, 0x82, 0x03, 0x79, 0xba, 0x3b, 0xa9, 0xce, 0x9c,
+ 0xc7, 0x7f, 0x6d, 0xc1, 0x94, 0x81, 0x7f, 0x04, 0x17, 0xe0, 0xb2, 0x79, 0x01, 0x3e, 0xde, 0xf5,
+ 0x1b, 0x72, 0x2e, 0xbe, 0x9f, 0x2e, 0x26, 0xfa, 0xce, 0x2e, 0xbc, 0xf7, 0xa1, 0x6f, 0xcb, 0x09,
+ 0xea, 0xe2, 0x5d, 0x7f, 0xb1, 0xa7, 0xb1, 0x9e, 0xbd, 0xea, 0x04, 0xc2, 0x9c, 0xe4, 0x79, 0x39,
+ 0xea, 0xb4, 0xa8, 0xab, 0x29, 0x09, 0x6b, 0x0a, 0x5d, 0x86, 0x81, 0xb0, 0xe6, 0xb7, 0x94, 0x57,
+ 0x22, 0xcb, 0x8f, 0x5f, 0x65, 0x25, 0x07, 0x7b, 0x65, 0x64, 0x36, 0x47, 0x8b, 0xb1, 0xc0, 0x47,
+ 0xef, 0xc2, 0x18, 0xfb, 0xa5, 0x6c, 0x3b, 0x8b, 0xf9, 0x12, 0x98, 0xaa, 0x8e, 0xc8, 0x0d, 0x9f,
+ 0x8d, 0x22, 0x6c, 0x92, 0x9a, 0xd9, 0x84, 0x61, 0xf5, 0x59, 0x8f, 0x54, 0xf3, 0xff, 0xc7, 0x45,
+ 0x98, 0xce, 0x58, 0x73, 0x28, 0x34, 0x66, 0xe2, 0x52, 0x8f, 0x4b, 0xf5, 0x03, 0xce, 0x45, 0xc8,
+ 0x1e, 0x80, 0x75, 0xb1, 0xb6, 0x7a, 0x6e, 0xf4, 0x66, 0x48, 0x92, 0x8d, 0xd2, 0xa2, 0xee, 0x8d,
+ 0xd2, 0xc6, 0x8e, 0x6c, 0xa8, 0x69, 0x43, 0xaa, 0xa7, 0x8f, 0x74, 0x4e, 0x7f, 0xa7, 0x0f, 0x8e,
+ 0x65, 0x19, 0xbc, 0xa0, 0x2f, 0xc1, 0x00, 0x73, 0x9b, 0x93, 0x82, 0xb3, 0x97, 0x7b, 0x35, 0x95,
+ 0x99, 0x65, 0x9e, 0x77, 0x22, 0x1a, 0xf2, 0xac, 0x3c, 0x8e, 0x78, 0x61, 0xd7, 0x61, 0x16, 0x6d,
+ 0xb2, 0x28, 0x65, 0xe2, 0xf6, 0x94, 0xc7, 0xc7, 0x27, 0x7a, 0xee, 0x80, 0xb8, 0x7f, 0xc3, 0x84,
+ 0xdd, 0x98, 0x2c, 0xee, 0x6e, 0x37, 0x26, 0x5b, 0x46, 0x2b, 0x30, 0x50, 0xe3, 0x06, 0x49, 0xc5,
+ 0xee, 0x47, 0x18, 0xb7, 0x46, 0x52, 0x07, 0xb0, 0xb0, 0x42, 0x12, 0x04, 0x66, 0x5c, 0x18, 0xd1,
+ 0x06, 0xe6, 0x91, 0x2e, 0x9e, 0x6d, 0x7a, 0xf1, 0x69, 0x43, 0xf0, 0x48, 0x17, 0xd0, 0x3f, 0xd7,
+ 0xee, 0x7e, 0x71, 0x1e, 0x7c, 0xdc, 0xe0, 0x9d, 0x4e, 0x27, 0x9c, 0x19, 0x13, 0xfb, 0x8a, 0xf1,
+ 0x52, 0x55, 0x33, 0x8d, 0x40, 0x6e, 0x2e, 0x34, 0xf3, 0xc2, 0xef, 0x9c, 0x3a, 0xc0, 0xfe, 0x39,
+ 0x0b, 0x12, 0xee, 0x66, 0x4a, 0xdc, 0x69, 0xe5, 0x8a, 0x3b, 0xcf, 0x41, 0x5f, 0xe0, 0x37, 0x24,
+ 0x3f, 0xa5, 0x30, 0xb0, 0xdf, 0x20, 0x98, 0x41, 0x28, 0x46, 0x14, 0x0b, 0xb1, 0x46, 0xf5, 0x07,
+ 0xba, 0x78, 0x7a, 0x3f, 0x01, 0xfd, 0x0d, 0xb2, 0x43, 0x1a, 0xc9, 0x94, 0xc0, 0xd7, 0x69, 0x21,
+ 0xe6, 0x30, 0xfb, 0xd7, 0xfa, 0xe0, 0x4c, 0xc7, 0x60, 0x86, 0x94, 0xc1, 0xdc, 0x74, 0x22, 0x72,
+ 0xd7, 0xd9, 0x4d, 0xa6, 0xc2, 0xbc, 0xc2, 0x8b, 0xb1, 0x84, 0x33, 0xd7, 0x6f, 0x9e, 0xde, 0x29,
+ 0x21, 0x1c, 0x16, 0x59, 0x9d, 0x04, 0xd4, 0x14, 0x36, 0x16, 0x1f, 0x86, 0xb0, 0xf1, 0x45, 0x80,
+ 0x30, 0x6c, 0x70, 0xab, 0xd2, 0xba, 0xf0, 0x29, 0x8f, 0x83, 0x6d, 0x54, 0xaf, 0x0b, 0x08, 0xd6,
+ 0xb0, 0xd0, 0x22, 0x4c, 0xb6, 0x02, 0x3f, 0xe2, 0xb2, 0xf6, 0x45, 0x6e, 0x78, 0xdd, 0x6f, 0xc6,
+ 0x91, 0xab, 0x24, 0xe0, 0x38, 0x55, 0x03, 0xbd, 0x02, 0x23, 0x22, 0xb6, 0x5c, 0xc5, 0xf7, 0x1b,
+ 0x42, 0xbc, 0xa7, 0x6c, 0x91, 0xab, 0x31, 0x08, 0xeb, 0x78, 0x5a, 0x35, 0x26, 0xc0, 0x1f, 0xcc,
+ 0xac, 0xc6, 0x85, 0xf8, 0x1a, 0x5e, 0x22, 0x0f, 0xc5, 0x50, 0x4f, 0x79, 0x28, 0x62, 0x81, 0xe7,
+ 0x70, 0xcf, 0xfa, 0x64, 0xe8, 0x2a, 0x22, 0xfc, 0x56, 0x1f, 0x4c, 0x8b, 0x85, 0xf3, 0xa8, 0x97,
+ 0xcb, 0xcd, 0xf4, 0x72, 0x79, 0x18, 0x22, 0xd1, 0x8f, 0xd6, 0xcc, 0x51, 0xaf, 0x99, 0x9f, 0xb5,
+ 0xc0, 0xe4, 0x21, 0xd1, 0x7f, 0x90, 0x9b, 0x4b, 0xf8, 0x95, 0x5c, 0x9e, 0x34, 0x0e, 0x52, 0xff,
+ 0xc1, 0xb2, 0x0a, 0xdb, 0xff, 0xb3, 0x05, 0x8f, 0x77, 0xa5, 0x88, 0x96, 0x60, 0x98, 0x31, 0xba,
+ 0xda, 0xbb, 0xf8, 0x69, 0xe5, 0x98, 0x21, 0x01, 0x39, 0x7c, 0x77, 0x5c, 0x13, 0x2d, 0xa5, 0x92,
+ 0x36, 0x3f, 0x93, 0x91, 0xb4, 0xf9, 0xb8, 0x31, 0x3c, 0x0f, 0x98, 0xb5, 0xf9, 0x6b, 0xf4, 0xc6,
+ 0x31, 0xbd, 0x3b, 0x3f, 0x61, 0x88, 0x73, 0xed, 0x84, 0x38, 0x17, 0x99, 0xd8, 0xda, 0x1d, 0xf2,
+ 0x16, 0x4c, 0xb2, 0xa0, 0xb3, 0xcc, 0x4d, 0x48, 0xb8, 0x85, 0x16, 0x62, 0xab, 0xe2, 0xeb, 0x09,
+ 0x18, 0x4e, 0x61, 0xdb, 0x7f, 0x59, 0x84, 0x01, 0xbe, 0xfd, 0x8e, 0xe0, 0xe1, 0xfb, 0x1c, 0x0c,
+ 0xbb, 0xcd, 0x66, 0x9b, 0xe7, 0xe1, 0xed, 0x8f, 0x0d, 0xcb, 0x57, 0x64, 0x21, 0x8e, 0xe1, 0x68,
+ 0x59, 0x68, 0x12, 0x3a, 0xc4, 0xb5, 0xe7, 0x1d, 0x9f, 0x5d, 0x74, 0x22, 0x87, 0x73, 0x71, 0xea,
+ 0x9e, 0x8d, 0x75, 0x0e, 0xe8, 0xf3, 0x00, 0x61, 0x14, 0xb8, 0xde, 0x26, 0x2d, 0x13, 0xc9, 0x4f,
+ 0x9e, 0xed, 0x40, 0xad, 0xaa, 0x90, 0x39, 0xcd, 0xf8, 0xcc, 0x51, 0x00, 0xac, 0x51, 0x44, 0xb3,
+ 0xc6, 0x4d, 0x3f, 0x93, 0x98, 0x3b, 0xe0, 0x54, 0xe3, 0x39, 0x9b, 0x79, 0x15, 0x86, 0x15, 0xf1,
+ 0x6e, 0x72, 0xc5, 0x51, 0x9d, 0x61, 0xfb, 0x14, 0x4c, 0x24, 0xfa, 0x76, 0x28, 0xb1, 0xe4, 0xaf,
+ 0x5b, 0x30, 0xc1, 0x3b, 0xb3, 0xe4, 0xed, 0x88, 0xdb, 0xe0, 0x3e, 0x1c, 0x6b, 0x64, 0x9c, 0xca,
+ 0x62, 0xfa, 0x7b, 0x3f, 0xc5, 0x95, 0x18, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0x17, 0xe8, 0x8e,
+ 0xa3, 0xa7, 0xae, 0xd3, 0x10, 0xb1, 0x4d, 0x46, 0xf9, 0x6e, 0xe3, 0x65, 0x58, 0x41, 0xed, 0x3f,
+ 0xb5, 0x60, 0x8a, 0xf7, 0xfc, 0x1a, 0xd9, 0x55, 0x67, 0xd3, 0x0f, 0xb3, 0xef, 0x22, 0x03, 0x7c,
+ 0x21, 0x27, 0x03, 0xbc, 0xfe, 0x69, 0xc5, 0x8e, 0x9f, 0xf6, 0x4d, 0x0b, 0xc4, 0x0a, 0x39, 0x02,
+ 0x49, 0xcb, 0x4f, 0x98, 0x92, 0x96, 0x99, 0xfc, 0x4d, 0x90, 0x23, 0x62, 0xf9, 0x3b, 0x0b, 0x26,
+ 0x39, 0x82, 0x16, 0x48, 0xed, 0x87, 0x39, 0x0f, 0xf3, 0xe6, 0x17, 0x65, 0x9a, 0xb5, 0x5e, 0x23,
+ 0xbb, 0xeb, 0x7e, 0xc5, 0x89, 0xb6, 0xb2, 0x3f, 0xca, 0x98, 0xac, 0xbe, 0x8e, 0x93, 0x55, 0x97,
+ 0x1b, 0xc8, 0xc8, 0xf5, 0xd9, 0x45, 0x00, 0x7c, 0xd8, 0x5c, 0x9f, 0xf6, 0xf7, 0x2d, 0x40, 0xbc,
+ 0x19, 0x83, 0x71, 0xa3, 0xec, 0x10, 0x2b, 0xcd, 0x8c, 0x57, 0xa7, 0x20, 0x58, 0xc3, 0x7a, 0x28,
+ 0xc3, 0x93, 0x30, 0x65, 0x29, 0x76, 0x37, 0x65, 0x39, 0xc4, 0x88, 0x7e, 0x73, 0x10, 0x92, 0x8e,
+ 0xa1, 0xe8, 0x16, 0x8c, 0xd6, 0x9c, 0x96, 0x73, 0xc7, 0x6d, 0xb8, 0x91, 0x4b, 0xc2, 0x4e, 0x76,
+ 0x6e, 0x0b, 0x1a, 0x9e, 0x30, 0x3e, 0xd0, 0x4a, 0xb0, 0x41, 0x07, 0xcd, 0x02, 0xb4, 0x02, 0x77,
+ 0xc7, 0x6d, 0x90, 0x4d, 0x26, 0x10, 0x62, 0xd1, 0x94, 0xb8, 0xd1, 0x9d, 0x2c, 0xc5, 0x1a, 0x46,
+ 0x46, 0x10, 0x93, 0xe2, 0x23, 0x0e, 0x62, 0x02, 0x47, 0x16, 0xc4, 0xa4, 0xef, 0x50, 0x41, 0x4c,
+ 0x86, 0x0e, 0x1d, 0xc4, 0xa4, 0xbf, 0xa7, 0x20, 0x26, 0x18, 0x4e, 0x48, 0xde, 0x93, 0xfe, 0x5f,
+ 0x76, 0x1b, 0x44, 0x3c, 0x38, 0x78, 0x08, 0xa8, 0x99, 0xfd, 0xbd, 0xf2, 0x09, 0x9c, 0x89, 0x81,
+ 0x73, 0x6a, 0xa2, 0x4f, 0x43, 0xc9, 0x69, 0x34, 0xfc, 0xbb, 0x6a, 0x52, 0x97, 0xc2, 0x9a, 0xd3,
+ 0x88, 0x23, 0x03, 0x0e, 0xcd, 0x9f, 0xde, 0xdf, 0x2b, 0x97, 0xe6, 0x72, 0x70, 0x70, 0x6e, 0x6d,
+ 0xf4, 0x06, 0x0c, 0xb7, 0x02, 0xbf, 0xb6, 0xaa, 0x79, 0xaf, 0x9f, 0xa5, 0x03, 0x58, 0x91, 0x85,
+ 0x07, 0x7b, 0xe5, 0x31, 0xf5, 0x87, 0x5d, 0xf8, 0x71, 0x85, 0x8c, 0xf8, 0x20, 0x23, 0x8f, 0x3a,
+ 0x3e, 0xc8, 0xe8, 0x43, 0x8e, 0x0f, 0x62, 0x6f, 0xc3, 0x74, 0x95, 0x04, 0xae, 0xd3, 0x70, 0xef,
+ 0x53, 0x9e, 0x5c, 0x9e, 0x81, 0xeb, 0x30, 0x1c, 0x24, 0x4e, 0xfd, 0x9e, 0xe2, 0xd9, 0x6b, 0x72,
+ 0x19, 0x79, 0xca, 0xc7, 0x84, 0xec, 0xff, 0xd7, 0x82, 0x41, 0xe1, 0x6c, 0x7a, 0x04, 0x9c, 0xe9,
+ 0x9c, 0xa1, 0x92, 0x29, 0x67, 0x4f, 0x0a, 0xeb, 0x4c, 0xae, 0x32, 0x66, 0x25, 0xa1, 0x8c, 0x79,
+ 0xbc, 0x13, 0x91, 0xce, 0x6a, 0x98, 0xff, 0xa4, 0x48, 0x5f, 0x08, 0x46, 0xd8, 0x83, 0x47, 0x3f,
+ 0x04, 0x6b, 0x30, 0x18, 0x0a, 0xb7, 0xfb, 0x42, 0xbe, 0x2f, 0x4f, 0x72, 0x12, 0x63, 0x1b, 0x48,
+ 0xe1, 0x68, 0x2f, 0x89, 0x64, 0xfa, 0xf3, 0x17, 0x1f, 0xa1, 0x3f, 0x7f, 0xb7, 0xc0, 0x10, 0x7d,
+ 0x0f, 0x23, 0x30, 0x84, 0xfd, 0x6d, 0x76, 0x3b, 0xeb, 0xe5, 0x47, 0xc0, 0xb8, 0x5d, 0x31, 0xef,
+ 0x71, 0xbb, 0xc3, 0xca, 0x12, 0x9d, 0xca, 0x61, 0xe0, 0x7e, 0xd5, 0x82, 0x33, 0x19, 0x5f, 0xa5,
+ 0x71, 0x73, 0xcf, 0xc3, 0x90, 0xd3, 0xae, 0xbb, 0x6a, 0x2f, 0x6b, 0xda, 0xe2, 0x39, 0x51, 0x8e,
+ 0x15, 0x06, 0x5a, 0x80, 0x29, 0x92, 0x0a, 0x71, 0xcb, 0x23, 0x64, 0x31, 0x0f, 0xe5, 0x74, 0x7c,
+ 0xdb, 0x34, 0xbe, 0x0a, 0x2e, 0x57, 0xcc, 0x0d, 0x2e, 0xf7, 0x4b, 0x16, 0x8c, 0x28, 0xc7, 0xf3,
+ 0x47, 0x3e, 0xda, 0x6f, 0x99, 0xa3, 0xfd, 0x58, 0x87, 0xd1, 0xce, 0x19, 0xe6, 0x3f, 0x29, 0xa8,
+ 0xfe, 0x56, 0xfc, 0x20, 0xea, 0x81, 0x4b, 0x7c, 0x70, 0xb7, 0x97, 0x4b, 0x30, 0xe2, 0xb4, 0x5a,
+ 0x12, 0x20, 0xed, 0x17, 0x59, 0x76, 0x92, 0xb8, 0x18, 0xeb, 0x38, 0xca, 0x0b, 0xa7, 0x98, 0xeb,
+ 0x85, 0x53, 0x07, 0x88, 0x9c, 0x60, 0x93, 0x44, 0xb4, 0x4c, 0x98, 0x5b, 0xe7, 0x9f, 0x37, 0xed,
+ 0xc8, 0x6d, 0xcc, 0xba, 0x5e, 0x14, 0x46, 0xc1, 0xec, 0x8a, 0x17, 0xdd, 0x08, 0xf8, 0x33, 0x55,
+ 0x0b, 0xe1, 0xa8, 0x68, 0x61, 0x8d, 0xae, 0x0c, 0xb2, 0xc2, 0xda, 0xe8, 0x37, 0x0d, 0x61, 0xd6,
+ 0x44, 0x39, 0x56, 0x18, 0xf6, 0xab, 0xec, 0xf6, 0x61, 0x63, 0x7a, 0xb8, 0xd0, 0x84, 0x7f, 0x35,
+ 0xaa, 0x66, 0x83, 0xa9, 0x84, 0x17, 0xf5, 0x00, 0x88, 0x9d, 0x0f, 0x7b, 0xda, 0xb0, 0xee, 0xcf,
+ 0x1a, 0x47, 0x49, 0x44, 0x9f, 0x4d, 0x19, 0x37, 0xbd, 0xd0, 0xe5, 0xd6, 0x38, 0x84, 0x39, 0x13,
+ 0x4b, 0x55, 0xc8, 0x12, 0xb9, 0xad, 0x54, 0xc4, 0xbe, 0xd0, 0x52, 0x15, 0x0a, 0x00, 0x8e, 0x71,
+ 0x28, 0xc3, 0xa6, 0xfe, 0x84, 0x25, 0x14, 0x47, 0xb4, 0x57, 0xd8, 0x21, 0xd6, 0x30, 0xd0, 0x45,
+ 0x21, 0xb4, 0xe0, 0xba, 0x87, 0xc7, 0x12, 0x42, 0x0b, 0x39, 0x5c, 0x9a, 0xa4, 0xe9, 0x12, 0x8c,
+ 0x90, 0x7b, 0x11, 0x09, 0x3c, 0xa7, 0x41, 0x5b, 0xe8, 0x8f, 0xe3, 0xf3, 0x2e, 0xc5, 0xc5, 0x58,
+ 0xc7, 0x41, 0xeb, 0x30, 0x11, 0x72, 0x59, 0x9e, 0xca, 0xa3, 0xc2, 0x65, 0xa2, 0xcf, 0x2a, 0x97,
+ 0x7f, 0x13, 0x7c, 0xc0, 0x8a, 0xf8, 0xe9, 0x24, 0x03, 0xa1, 0x24, 0x49, 0xa0, 0x37, 0x61, 0xbc,
+ 0xe1, 0x3b, 0xf5, 0x79, 0xa7, 0xe1, 0x78, 0x35, 0x36, 0x3e, 0x43, 0x46, 0x34, 0xcc, 0xf1, 0xeb,
+ 0x06, 0x14, 0x27, 0xb0, 0x29, 0x83, 0xa8, 0x97, 0x88, 0xdc, 0x3f, 0x8e, 0xb7, 0x49, 0xc2, 0xd2,
+ 0x30, 0xfb, 0x2a, 0xc6, 0x20, 0x5e, 0xcf, 0xc1, 0xc1, 0xb9, 0xb5, 0xd1, 0x65, 0x18, 0x95, 0x9f,
+ 0xaf, 0xc5, 0x0d, 0x8a, 0x1d, 0x9a, 0x34, 0x18, 0x36, 0x30, 0x51, 0x08, 0xc7, 0xe5, 0xff, 0xf5,
+ 0xc0, 0xd9, 0xd8, 0x70, 0x6b, 0x22, 0x98, 0x06, 0x77, 0xfe, 0xfe, 0x94, 0xf4, 0x34, 0x5d, 0xca,
+ 0x42, 0x3a, 0xd8, 0x2b, 0x9f, 0x16, 0xa3, 0x96, 0x09, 0xc7, 0xd9, 0xb4, 0xd1, 0x2a, 0x4c, 0x73,
+ 0x1b, 0x98, 0x85, 0x2d, 0x52, 0xdb, 0x96, 0x1b, 0x8e, 0x71, 0x8d, 0x9a, 0xe3, 0xcf, 0xd5, 0x34,
+ 0x0a, 0xce, 0xaa, 0x87, 0xde, 0x83, 0x52, 0xab, 0x7d, 0xa7, 0xe1, 0x86, 0x5b, 0x6b, 0x7e, 0xc4,
+ 0x4c, 0xc8, 0xe6, 0xea, 0xf5, 0x80, 0x84, 0xdc, 0x37, 0x98, 0x5d, 0xbd, 0x32, 0xd6, 0x53, 0x25,
+ 0x07, 0x0f, 0xe7, 0x52, 0x40, 0xf7, 0xe1, 0x78, 0x62, 0x21, 0x88, 0xa0, 0x2d, 0xe3, 0xf9, 0x59,
+ 0xd4, 0xaa, 0x59, 0x15, 0x44, 0xfc, 0xa3, 0x2c, 0x10, 0xce, 0x6e, 0x02, 0xbd, 0x06, 0xe0, 0xb6,
+ 0x96, 0x9d, 0xa6, 0xdb, 0xa0, 0xcf, 0xd1, 0x69, 0xb6, 0x46, 0xe8, 0xd3, 0x04, 0x56, 0x2a, 0xb2,
+ 0x94, 0x9e, 0xcd, 0xe2, 0xdf, 0x2e, 0xd6, 0xb0, 0xd1, 0x75, 0x18, 0x17, 0xff, 0x76, 0xc5, 0x94,
+ 0x4e, 0xa9, 0x84, 0xbb, 0xe3, 0xb2, 0x86, 0x9a, 0xc7, 0x44, 0x09, 0x4e, 0xd4, 0x45, 0x9b, 0x70,
+ 0x46, 0x66, 0xfb, 0xd5, 0xd7, 0xa7, 0x9c, 0x83, 0x90, 0xa5, 0x2e, 0x1b, 0xe2, 0x3e, 0x45, 0x73,
+ 0x9d, 0x10, 0x71, 0x67, 0x3a, 0xf4, 0x5e, 0xd7, 0x97, 0x39, 0xf7, 0x18, 0x3f, 0x1e, 0xc7, 0x14,
+ 0xbd, 0x9e, 0x04, 0xe2, 0x34, 0x3e, 0xf2, 0xe1, 0xb8, 0xeb, 0x65, 0xad, 0xea, 0x13, 0x8c, 0xd0,
+ 0x27, 0xb9, 0xb3, 0x7c, 0xe7, 0x15, 0x9d, 0x09, 0xc7, 0xd9, 0x74, 0xd1, 0x0a, 0x4c, 0x47, 0xbc,
+ 0x60, 0xd1, 0x0d, 0x79, 0x66, 0x24, 0xfa, 0xec, 0x3b, 0xc9, 0x9a, 0x3b, 0x49, 0x57, 0xf3, 0x7a,
+ 0x1a, 0x8c, 0xb3, 0xea, 0x7c, 0x30, 0x03, 0xd0, 0xef, 0x58, 0xb4, 0xb6, 0xc6, 0xe8, 0xa3, 0x2f,
+ 0xc0, 0xa8, 0x3e, 0x3e, 0x82, 0x69, 0x39, 0x9f, 0xcd, 0x07, 0x6b, 0xc7, 0x0b, 0x7f, 0x26, 0xa8,
+ 0x23, 0x44, 0x87, 0x61, 0x83, 0x22, 0xaa, 0x65, 0x04, 0xb9, 0xb8, 0xd8, 0x1b, 0x53, 0xd4, 0xbb,
+ 0xfd, 0x23, 0x81, 0xec, 0x9d, 0x83, 0xae, 0xc3, 0x50, 0xad, 0xe1, 0x12, 0x2f, 0x5a, 0xa9, 0x74,
+ 0x0a, 0xf5, 0xba, 0x20, 0x70, 0xc4, 0x56, 0x14, 0x09, 0xcd, 0x78, 0x19, 0x56, 0x14, 0xec, 0xcb,
+ 0x30, 0x52, 0x6d, 0x10, 0xd2, 0xe2, 0x7e, 0x5c, 0xe8, 0x19, 0xf6, 0x30, 0x61, 0xac, 0xa5, 0xc5,
+ 0x58, 0x4b, 0xfd, 0xcd, 0xc1, 0x98, 0x4a, 0x09, 0xb7, 0x7f, 0xaf, 0x00, 0xe5, 0x2e, 0x79, 0xf5,
+ 0x12, 0xfa, 0x36, 0xab, 0x27, 0x7d, 0xdb, 0x1c, 0x4c, 0xc4, 0xff, 0x74, 0x51, 0x9e, 0x32, 0x86,
+ 0xbe, 0x65, 0x82, 0x71, 0x12, 0xbf, 0x67, 0xbf, 0x16, 0x5d, 0x65, 0xd7, 0xd7, 0xd5, 0x33, 0xcb,
+ 0x50, 0xd5, 0xf7, 0xf7, 0xfe, 0xf6, 0xce, 0x55, 0xbb, 0xda, 0xdf, 0x2e, 0xc0, 0x71, 0x35, 0x84,
+ 0x3f, 0xbe, 0x03, 0x77, 0x33, 0x3d, 0x70, 0x0f, 0x41, 0x69, 0x6d, 0xdf, 0x80, 0x01, 0x1e, 0x7f,
+ 0xb6, 0x07, 0x9e, 0xff, 0x09, 0x33, 0x15, 0x80, 0x62, 0x33, 0x8d, 0x74, 0x00, 0xff, 0xc8, 0x82,
+ 0x89, 0x84, 0x83, 0x24, 0xc2, 0x9a, 0x17, 0xfd, 0x83, 0xf0, 0xe5, 0x59, 0x1c, 0xff, 0x39, 0xe8,
+ 0xdb, 0xf2, 0x95, 0x91, 0xb2, 0xc2, 0xb8, 0xea, 0x87, 0x11, 0x66, 0x10, 0xfb, 0xcf, 0x2c, 0xe8,
+ 0x5f, 0x77, 0x5c, 0x2f, 0x92, 0xda, 0x0f, 0x2b, 0x47, 0xfb, 0xd1, 0xcb, 0x77, 0xa1, 0x57, 0x60,
+ 0x80, 0x6c, 0x6c, 0x90, 0x5a, 0x24, 0x66, 0x55, 0x46, 0xd3, 0x18, 0x58, 0x62, 0xa5, 0x94, 0x09,
+ 0x65, 0x8d, 0xf1, 0xbf, 0x58, 0x20, 0xa3, 0xdb, 0x30, 0x1c, 0xb9, 0x4d, 0x32, 0x57, 0xaf, 0x0b,
+ 0x9b, 0x80, 0x07, 0x08, 0x01, 0xb3, 0x2e, 0x09, 0xe0, 0x98, 0x96, 0xfd, 0xf5, 0x02, 0x40, 0x1c,
+ 0xef, 0xae, 0xdb, 0x27, 0xce, 0xa7, 0xb4, 0xc5, 0xe7, 0x33, 0xb4, 0xc5, 0x28, 0x26, 0x98, 0xa1,
+ 0x2a, 0x56, 0xc3, 0x54, 0xec, 0x69, 0x98, 0xfa, 0x0e, 0x33, 0x4c, 0x0b, 0x30, 0x15, 0xc7, 0xeb,
+ 0x33, 0xc3, 0x95, 0xb2, 0xfb, 0x7b, 0x3d, 0x09, 0xc4, 0x69, 0x7c, 0x9b, 0xc0, 0x39, 0x15, 0xb6,
+ 0x4c, 0xdc, 0x85, 0xcc, 0x95, 0x40, 0xd7, 0xbe, 0x77, 0x19, 0xa7, 0x58, 0x1d, 0x5e, 0xc8, 0x55,
+ 0x87, 0xff, 0x2b, 0x0b, 0x8e, 0x25, 0xdb, 0x61, 0x7e, 0xf7, 0x5f, 0xb5, 0xe0, 0x78, 0x9c, 0x56,
+ 0x2a, 0x6d, 0x82, 0xf0, 0x72, 0xc7, 0x50, 0x6c, 0x39, 0x3d, 0x8e, 0xc3, 0xb6, 0xac, 0x66, 0x91,
+ 0xc6, 0xd9, 0x2d, 0xda, 0xff, 0x4f, 0x1f, 0x94, 0xf2, 0x62, 0xb8, 0x31, 0x4f, 0x23, 0xe7, 0x5e,
+ 0x75, 0x9b, 0xdc, 0x15, 0xfe, 0x1c, 0xb1, 0xa7, 0x11, 0x2f, 0xc6, 0x12, 0x9e, 0xcc, 0x24, 0x56,
+ 0xe8, 0x31, 0x93, 0xd8, 0x16, 0x4c, 0xdd, 0xdd, 0x22, 0xde, 0x4d, 0x2f, 0x74, 0x22, 0x37, 0xdc,
+ 0x70, 0x99, 0x02, 0x9d, 0xaf, 0x9b, 0xd7, 0xa4, 0xd7, 0xc5, 0xed, 0x24, 0xc2, 0xc1, 0x5e, 0xf9,
+ 0x8c, 0x51, 0x10, 0x77, 0x99, 0x1f, 0x24, 0x38, 0x4d, 0x34, 0x9d, 0x88, 0xad, 0xef, 0x11, 0x27,
+ 0x62, 0x6b, 0xba, 0xc2, 0xec, 0x46, 0xba, 0x91, 0xb0, 0x67, 0xeb, 0xaa, 0x2a, 0xc5, 0x1a, 0x06,
+ 0xfa, 0x1c, 0x20, 0x3d, 0x93, 0xa6, 0x11, 0x42, 0xf7, 0x85, 0xfd, 0xbd, 0x32, 0x5a, 0x4b, 0x41,
+ 0x0f, 0xf6, 0xca, 0xd3, 0xb4, 0x74, 0xc5, 0xa3, 0xcf, 0xdf, 0x38, 0xee, 0x60, 0x06, 0x21, 0x74,
+ 0x1b, 0x26, 0x69, 0x29, 0xdb, 0x51, 0x32, 0x3e, 0x2f, 0x7f, 0xb2, 0x3e, 0xb7, 0xbf, 0x57, 0x9e,
+ 0x5c, 0x4b, 0xc0, 0xf2, 0x48, 0xa7, 0x88, 0x64, 0xe4, 0x63, 0x1b, 0xea, 0x35, 0x1f, 0x9b, 0xfd,
+ 0x55, 0x0b, 0x4e, 0xd1, 0x0b, 0xae, 0x7e, 0x3d, 0x47, 0x8b, 0xee, 0xb4, 0x5c, 0xae, 0xa7, 0x11,
+ 0x57, 0x0d, 0x93, 0xd5, 0x55, 0x56, 0xb8, 0x96, 0x46, 0x41, 0xe9, 0x09, 0xbf, 0xed, 0x7a, 0xf5,
+ 0xe4, 0x09, 0x7f, 0xcd, 0xf5, 0xea, 0x98, 0x41, 0xd4, 0x95, 0x55, 0xcc, 0x8d, 0xf7, 0xff, 0x2d,
+ 0xba, 0x57, 0x69, 0x5f, 0x7e, 0xa8, 0xdd, 0x40, 0xcf, 0xe9, 0x3a, 0x55, 0x61, 0x3e, 0x99, 0xab,
+ 0x4f, 0xfd, 0x8a, 0x05, 0xc2, 0xfb, 0xbd, 0x87, 0x3b, 0xf9, 0x5d, 0x18, 0xdd, 0x49, 0x67, 0x19,
+ 0x3e, 0x97, 0x1f, 0x0e, 0x40, 0xe4, 0x16, 0x56, 0x2c, 0xba, 0x91, 0x51, 0xd8, 0xa0, 0x65, 0xd7,
+ 0x41, 0x40, 0x17, 0x09, 0xd3, 0x6a, 0x74, 0xef, 0xcd, 0x8b, 0x00, 0x75, 0x86, 0xcb, 0xf2, 0x6d,
+ 0x15, 0x4c, 0x8e, 0x6b, 0x51, 0x41, 0xb0, 0x86, 0x65, 0xff, 0x42, 0x11, 0x46, 0x64, 0x56, 0xdb,
+ 0xb6, 0xd7, 0x8b, 0xec, 0x51, 0x67, 0x9c, 0x0a, 0x5d, 0x19, 0xa7, 0xf7, 0x60, 0x2a, 0x20, 0xb5,
+ 0x76, 0x10, 0xba, 0x3b, 0x44, 0x82, 0xc5, 0x26, 0x99, 0xe5, 0xe9, 0x26, 0x12, 0xc0, 0x03, 0x16,
+ 0x22, 0x2b, 0x51, 0xc8, 0x94, 0xc6, 0x69, 0x42, 0xe8, 0x22, 0x0c, 0x33, 0xd1, 0x7b, 0x25, 0x16,
+ 0x08, 0x2b, 0xc1, 0xd7, 0xaa, 0x04, 0xe0, 0x18, 0x87, 0x3d, 0x0e, 0xda, 0x77, 0xb4, 0x64, 0x68,
+ 0xf1, 0xe3, 0x80, 0x17, 0x63, 0x09, 0x47, 0x9f, 0x86, 0x49, 0x5e, 0x2f, 0xf0, 0x5b, 0xce, 0x26,
+ 0x57, 0x09, 0xf6, 0xab, 0xf0, 0x3a, 0x93, 0xab, 0x09, 0xd8, 0xc1, 0x5e, 0xf9, 0x58, 0xb2, 0x8c,
+ 0x75, 0x3b, 0x45, 0x85, 0x59, 0xfe, 0xf1, 0x46, 0xe8, 0x9d, 0x91, 0x32, 0x18, 0x8c, 0x41, 0x58,
+ 0xc7, 0xb3, 0xff, 0xd6, 0x82, 0x29, 0x6d, 0xaa, 0x7a, 0xce, 0xf8, 0x61, 0x0c, 0x52, 0xa1, 0x87,
+ 0x41, 0x3a, 0x5c, 0xb4, 0x87, 0xcc, 0x19, 0xee, 0x7b, 0x48, 0x33, 0x6c, 0x7f, 0x01, 0x50, 0x3a,
+ 0x65, 0x32, 0x7a, 0x9b, 0x1b, 0xf2, 0xbb, 0x01, 0xa9, 0x77, 0x52, 0xf8, 0xeb, 0x91, 0x73, 0xa4,
+ 0xe7, 0x2a, 0xaf, 0x85, 0x55, 0x7d, 0xfb, 0x6f, 0xfb, 0x60, 0x32, 0x19, 0xab, 0x03, 0x5d, 0x85,
+ 0x01, 0xce, 0xa5, 0x0b, 0xf2, 0x1d, 0xec, 0xc9, 0xb4, 0x08, 0x1f, 0x3c, 0x1b, 0x0f, 0xe7, 0xee,
+ 0x45, 0x7d, 0xf4, 0x1e, 0x8c, 0xd4, 0xfd, 0xbb, 0xde, 0x5d, 0x27, 0xa8, 0xcf, 0x55, 0x56, 0xc4,
+ 0x09, 0x91, 0x29, 0x80, 0x5a, 0x8c, 0xd1, 0xf4, 0xa8, 0x21, 0xcc, 0x76, 0x22, 0x06, 0x61, 0x9d,
+ 0x1c, 0x5a, 0x67, 0x09, 0xa2, 0x36, 0xdc, 0xcd, 0x55, 0xa7, 0xd5, 0xc9, 0xab, 0x6b, 0x41, 0x22,
+ 0x69, 0x94, 0xc7, 0x44, 0x16, 0x29, 0x0e, 0xc0, 0x31, 0x21, 0xf4, 0x25, 0x98, 0x0e, 0x73, 0x54,
+ 0x62, 0x79, 0x19, 0xf4, 0x3b, 0x69, 0x89, 0xb8, 0x30, 0x25, 0x4b, 0x79, 0x96, 0xd5, 0x0c, 0xba,
+ 0x07, 0x48, 0x88, 0x9e, 0xd7, 0x83, 0x76, 0x18, 0xf1, 0x2c, 0x84, 0xe2, 0xd1, 0xf5, 0xf1, 0x6c,
+ 0x39, 0x41, 0x12, 0x5b, 0x6b, 0x9b, 0x05, 0x28, 0x4e, 0x63, 0xe0, 0x8c, 0x36, 0xd0, 0x16, 0x8c,
+ 0xb7, 0x8c, 0x04, 0x90, 0x6c, 0x6f, 0xe6, 0x44, 0xf1, 0xcd, 0x4b, 0x15, 0xc9, 0x6f, 0x69, 0x13,
+ 0x8a, 0x13, 0x74, 0xed, 0xaf, 0xf4, 0xc1, 0x8c, 0xcc, 0x86, 0x9e, 0xe1, 0x27, 0xf3, 0x65, 0x2b,
+ 0xe1, 0x28, 0xf3, 0x5a, 0xfe, 0x95, 0xf2, 0xc8, 0xdc, 0x65, 0xbe, 0x96, 0x76, 0x97, 0x79, 0xe3,
+ 0x90, 0xdd, 0x78, 0x68, 0x4e, 0x33, 0x3f, 0xb6, 0x9e, 0x2e, 0xfb, 0xc7, 0xc0, 0x60, 0x02, 0x10,
+ 0xe6, 0x71, 0xe6, 0x2b, 0x52, 0x49, 0x95, 0x23, 0x68, 0xb8, 0x2a, 0x70, 0x0c, 0xb6, 0x62, 0x54,
+ 0x46, 0xa3, 0x67, 0x27, 0xba, 0xa2, 0x43, 0x69, 0x92, 0x66, 0x2b, 0xda, 0x5d, 0x74, 0x03, 0xd1,
+ 0xe3, 0x4c, 0x9a, 0x4b, 0x02, 0x27, 0x4d, 0x53, 0x42, 0xb0, 0xa2, 0x83, 0x76, 0x60, 0x6a, 0x93,
+ 0xc5, 0x96, 0xd2, 0x12, 0x93, 0x8b, 0x13, 0x28, 0xf3, 0x84, 0xb8, 0xb2, 0xb0, 0x94, 0x9f, 0xc5,
+ 0x9c, 0x3f, 0x33, 0x53, 0x28, 0x38, 0xdd, 0x04, 0xdd, 0x1a, 0xc7, 0x9c, 0xbb, 0xe1, 0x52, 0xc3,
+ 0x09, 0x23, 0xb7, 0x36, 0xdf, 0xf0, 0x6b, 0xdb, 0xd5, 0xc8, 0x0f, 0x64, 0x62, 0xcb, 0xcc, 0x57,
+ 0xde, 0xdc, 0xed, 0x6a, 0x0a, 0xdf, 0x68, 0x9e, 0x25, 0x58, 0xcd, 0xc2, 0xc2, 0x99, 0x6d, 0xa1,
+ 0x35, 0x18, 0xdc, 0x74, 0x23, 0x4c, 0x5a, 0xbe, 0x38, 0x97, 0x32, 0x0f, 0xdd, 0x2b, 0x1c, 0xc5,
+ 0x68, 0x89, 0xc5, 0xbe, 0x12, 0x00, 0x2c, 0x89, 0xa0, 0xb7, 0xd5, 0x75, 0x33, 0x90, 0x2f, 0xea,
+ 0x4d, 0x5b, 0xf9, 0x65, 0x5e, 0x38, 0x6f, 0x42, 0xd1, 0xdb, 0x08, 0x3b, 0x45, 0xfd, 0x59, 0x5b,
+ 0x36, 0x24, 0x75, 0xf3, 0x83, 0xf4, 0x11, 0xbe, 0xb6, 0x5c, 0xc5, 0xb4, 0x22, 0x73, 0xb0, 0x0d,
+ 0x6b, 0xa1, 0x2b, 0x92, 0x64, 0x65, 0xfa, 0x1b, 0xaf, 0x54, 0x17, 0xaa, 0x2b, 0x06, 0x0d, 0x16,
+ 0x3f, 0x91, 0x15, 0x63, 0x5e, 0x1d, 0xdd, 0x82, 0xe1, 0x4d, 0x7e, 0xc4, 0x6e, 0xf0, 0xf0, 0xb1,
+ 0x39, 0xd7, 0xde, 0x15, 0x89, 0x64, 0xd0, 0x63, 0x97, 0x93, 0x02, 0xe1, 0x98, 0x14, 0xfa, 0x8a,
+ 0x05, 0xc7, 0x5b, 0x09, 0x59, 0x2d, 0x73, 0x8b, 0x13, 0x06, 0x71, 0x99, 0xae, 0x06, 0x95, 0xac,
+ 0x0a, 0x46, 0x83, 0x4c, 0xd1, 0x93, 0x89, 0x86, 0xb3, 0x9b, 0xa3, 0x03, 0x1d, 0xdc, 0xa9, 0x77,
+ 0xca, 0xab, 0x94, 0x08, 0x81, 0xc4, 0x07, 0x1a, 0xcf, 0x2f, 0x62, 0x5a, 0x11, 0xad, 0x03, 0x6c,
+ 0x34, 0x88, 0x88, 0x2d, 0x29, 0xcc, 0xaf, 0x32, 0xf9, 0x8c, 0x65, 0x85, 0x25, 0xe8, 0xb0, 0x37,
+ 0x6f, 0x5c, 0x8a, 0x35, 0x3a, 0x74, 0x29, 0xd5, 0x5c, 0xaf, 0x4e, 0x02, 0xa6, 0x46, 0xcb, 0x59,
+ 0x4a, 0x0b, 0x0c, 0x23, 0xbd, 0x94, 0x78, 0x39, 0x16, 0x14, 0x18, 0x2d, 0xd2, 0xda, 0xda, 0x08,
+ 0x3b, 0x65, 0xf0, 0x58, 0x20, 0xad, 0xad, 0xc4, 0x82, 0xe2, 0xb4, 0x58, 0x39, 0x16, 0x14, 0xe8,
+ 0x96, 0xd9, 0xa0, 0x1b, 0x88, 0x04, 0xa5, 0x89, 0xfc, 0x2d, 0xb3, 0xcc, 0x51, 0xd2, 0x5b, 0x46,
+ 0x00, 0xb0, 0x24, 0x82, 0x3e, 0x6f, 0xf2, 0x55, 0x93, 0x8c, 0xe6, 0x73, 0x5d, 0xf8, 0x2a, 0x83,
+ 0x6e, 0x67, 0xce, 0xea, 0x35, 0x28, 0x6c, 0xd4, 0x98, 0xfa, 0x2d, 0x47, 0x3b, 0xb1, 0xbc, 0x60,
+ 0x50, 0x63, 0x11, 0xf1, 0x97, 0x17, 0x70, 0x61, 0xa3, 0x46, 0x97, 0xbe, 0x73, 0xbf, 0x1d, 0x90,
+ 0x65, 0xb7, 0x41, 0x44, 0x36, 0x8f, 0xcc, 0xa5, 0x3f, 0x27, 0x91, 0xd2, 0x4b, 0x5f, 0x81, 0x70,
+ 0x4c, 0x8a, 0xd2, 0x8d, 0xb9, 0xbd, 0xe9, 0x7c, 0xba, 0x8a, 0xa9, 0x4b, 0xd3, 0xcd, 0xe4, 0xf7,
+ 0xb6, 0x61, 0x6c, 0x27, 0x6c, 0x6d, 0x11, 0x79, 0x2a, 0x32, 0xc5, 0x60, 0x4e, 0x4c, 0x8c, 0x5b,
+ 0x02, 0xd1, 0x0d, 0xa2, 0xb6, 0xd3, 0x48, 0x1d, 0xe4, 0x4c, 0x88, 0x73, 0x4b, 0x27, 0x86, 0x4d,
+ 0xda, 0x74, 0x21, 0xbc, 0xcf, 0x03, 0xd7, 0x31, 0x15, 0x61, 0xce, 0x42, 0xc8, 0x88, 0x6d, 0xc7,
+ 0x17, 0x82, 0x00, 0x60, 0x49, 0x44, 0x0d, 0x36, 0xbb, 0x80, 0x4e, 0x74, 0x19, 0xec, 0x54, 0x7f,
+ 0xe3, 0xc1, 0x66, 0x17, 0x4e, 0x4c, 0x8a, 0x5d, 0x34, 0xad, 0x2d, 0x3f, 0xf2, 0xbd, 0xc4, 0x25,
+ 0x77, 0x32, 0xff, 0xa2, 0xa9, 0x64, 0xe0, 0xa7, 0x2f, 0x9a, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, 0xfd,
+ 0xb8, 0x96, 0x8c, 0x41, 0x28, 0x32, 0x8e, 0x3c, 0x93, 0x13, 0xc2, 0x33, 0x1d, 0xa8, 0x90, 0x7f,
+ 0x9c, 0x02, 0xe1, 0x98, 0x14, 0xaa, 0x53, 0x4e, 0x57, 0x8f, 0x6d, 0xcb, 0x32, 0xa7, 0xe4, 0xf0,
+ 0x05, 0x59, 0x51, 0x70, 0x25, 0x97, 0xab, 0x43, 0x70, 0x82, 0x26, 0xb3, 0x11, 0xe4, 0x4e, 0x85,
+ 0x2c, 0xb1, 0x4a, 0xce, 0x54, 0x67, 0xf8, 0x1d, 0xf2, 0xa9, 0x16, 0x00, 0x2c, 0x89, 0xd0, 0xd1,
+ 0x10, 0xae, 0x70, 0x7e, 0xc8, 0xf2, 0x13, 0xe5, 0xa9, 0xf2, 0xb3, 0x14, 0x52, 0x32, 0xa0, 0xbb,
+ 0x00, 0xe1, 0x98, 0x14, 0x3d, 0xc9, 0xe9, 0x85, 0x77, 0x3a, 0xff, 0x24, 0x4f, 0x5e, 0x77, 0xec,
+ 0x24, 0xa7, 0x97, 0x5d, 0x51, 0x5c, 0x75, 0x2a, 0xfe, 0x38, 0xcb, 0xad, 0x92, 0xd3, 0x2f, 0x15,
+ 0xc0, 0x3c, 0xdd, 0x2f, 0x05, 0xc2, 0x31, 0x29, 0x76, 0x15, 0xb3, 0x20, 0x78, 0x67, 0x3b, 0x5c,
+ 0xc5, 0x14, 0x21, 0xe3, 0x2a, 0xd6, 0x82, 0xe4, 0xd9, 0xff, 0xb8, 0x00, 0x67, 0x3b, 0xef, 0xdb,
+ 0x58, 0x5b, 0x57, 0x89, 0xad, 0xa3, 0x12, 0xda, 0x3a, 0x2e, 0x3b, 0x8a, 0xb1, 0x7a, 0x0e, 0x6d,
+ 0x7c, 0x05, 0xa6, 0x94, 0xe3, 0x63, 0xc3, 0xad, 0xed, 0x6a, 0x09, 0x55, 0x55, 0x10, 0xa0, 0x6a,
+ 0x12, 0x01, 0xa7, 0xeb, 0xa0, 0x39, 0x98, 0x30, 0x0a, 0x57, 0x16, 0x85, 0xa0, 0x21, 0xce, 0x0a,
+ 0x62, 0x82, 0x71, 0x12, 0xdf, 0xfe, 0x45, 0x0b, 0x4e, 0xf2, 0x40, 0xbc, 0xa4, 0x5e, 0xf1, 0xeb,
+ 0x52, 0xa2, 0x70, 0xa8, 0xc8, 0xbd, 0x1b, 0x30, 0xd1, 0x32, 0xab, 0x76, 0x09, 0x36, 0xae, 0xa3,
+ 0xc6, 0x7d, 0x4d, 0x00, 0x70, 0x92, 0xa8, 0xfd, 0xf3, 0x05, 0x38, 0xd3, 0xd1, 0x92, 0x1f, 0x61,
+ 0x38, 0xb1, 0xd9, 0x0c, 0x9d, 0x38, 0x91, 0x7f, 0xb5, 0x45, 0x6a, 0x9a, 0xbe, 0x95, 0x99, 0xc4,
+ 0x5f, 0x59, 0xad, 0xce, 0xa5, 0x31, 0x70, 0x4e, 0x4d, 0xb4, 0x0c, 0x28, 0x0d, 0x11, 0x33, 0xcc,
+ 0x1e, 0xd3, 0x69, 0x7a, 0x38, 0xa3, 0x06, 0x7a, 0x15, 0xc6, 0x94, 0x87, 0x80, 0x36, 0xe3, 0xec,
+ 0x82, 0xc0, 0x3a, 0x00, 0x9b, 0x78, 0xe8, 0x12, 0x4f, 0x17, 0x25, 0x12, 0x8b, 0x09, 0xe5, 0xec,
+ 0x84, 0xcc, 0x05, 0x25, 0x8a, 0xb1, 0x8e, 0x33, 0x7f, 0xf9, 0x0f, 0xbe, 0x77, 0xf6, 0x63, 0x7f,
+ 0xf4, 0xbd, 0xb3, 0x1f, 0xfb, 0xd3, 0xef, 0x9d, 0xfd, 0xd8, 0x4f, 0xed, 0x9f, 0xb5, 0xfe, 0x60,
+ 0xff, 0xac, 0xf5, 0x47, 0xfb, 0x67, 0xad, 0x3f, 0xdd, 0x3f, 0x6b, 0xfd, 0x6f, 0xfb, 0x67, 0xad,
+ 0xaf, 0xff, 0xef, 0x67, 0x3f, 0xf6, 0x2e, 0x8a, 0x63, 0x61, 0x5f, 0xa4, 0xb3, 0x73, 0x71, 0xe7,
+ 0xd2, 0xbf, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x92, 0x1b, 0xe7, 0xc0, 0x7a, 0x29, 0x01, 0x00,
}
func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) {
@@ -10681,6 +10752,18 @@ func (m *EnvVarSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.FileKeyRef != nil {
+ {
+ size, err := m.FileKeyRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
if m.SecretKeyRef != nil {
{
size, err := m.SecretKeyRef.MarshalToSizedBuffer(dAtA[:i])
@@ -11426,6 +11509,54 @@ func (m *FCVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *FileKeySelector) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *FileKeySelector) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *FileKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Optional != nil {
+ i--
+ if *m.Optional {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.VolumeName)
+ copy(dAtA[i:], m.VolumeName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -15791,6 +15922,59 @@ func (m *PodAttachOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *PodCertificateProjection) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PodCertificateProjection) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodCertificateProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.CertificateChainPath)
+ copy(dAtA[i:], m.CertificateChainPath)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CertificateChainPath)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.KeyPath)
+ copy(dAtA[i:], m.KeyPath)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyPath)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.CredentialBundlePath)
+ copy(dAtA[i:], m.CredentialBundlePath)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CredentialBundlePath)))
+ i--
+ dAtA[i] = 0x22
+ if m.MaxExpirationSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxExpirationSeconds))
+ i--
+ dAtA[i] = 0x18
+ }
+ i -= len(m.KeyType)
+ copy(dAtA[i:], m.KeyType)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyType)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.SignerName)
+ copy(dAtA[i:], m.SignerName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *PodCondition) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -16597,6 +16781,15 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.HostnameOverride != nil {
+ i -= len(*m.HostnameOverride)
+ copy(dAtA[i:], *m.HostnameOverride)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HostnameOverride)))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xca
+ }
if m.Resources != nil {
{
size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i])
@@ -21108,6 +21301,18 @@ func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.PodCertificate != nil {
+ {
+ size, err := m.PodCertificate.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
if m.ClusterTrustBundle != nil {
{
size, err := m.ClusterTrustBundle.MarshalToSizedBuffer(dAtA[:i])
@@ -22896,6 +23101,10 @@ func (m *EnvVarSource) Size() (n int) {
l = m.SecretKeyRef.Size()
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.FileKeyRef != nil {
+ l = m.FileKeyRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -23149,6 +23358,24 @@ func (m *FCVolumeSource) Size() (n int) {
return n
}
+func (m *FileKeySelector) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.VolumeName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Optional != nil {
+ n += 2
+ }
+ return n
+}
+
func (m *FlexPersistentVolumeSource) Size() (n int) {
if m == nil {
return 0
@@ -24752,6 +24979,28 @@ func (m *PodAttachOptions) Size() (n int) {
return n
}
+func (m *PodCertificateProjection) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SignerName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.KeyType)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.MaxExpirationSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.MaxExpirationSeconds))
+ }
+ l = len(m.CredentialBundlePath)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.KeyPath)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.CertificateChainPath)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *PodCondition) Size() (n int) {
if m == nil {
return 0
@@ -25224,6 +25473,10 @@ func (m *PodSpec) Size() (n int) {
l = m.Resources.Size()
n += 2 + l + sovGenerated(uint64(l))
}
+ if m.HostnameOverride != nil {
+ l = len(*m.HostnameOverride)
+ n += 2 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -26751,6 +27004,10 @@ func (m *VolumeProjection) Size() (n int) {
l = m.ClusterTrustBundle.Size()
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.PodCertificate != nil {
+ l = m.PodCertificate.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -27777,6 +28034,7 @@ func (this *EnvVarSource) String() string {
`ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`,
`ConfigMapKeyRef:` + strings.Replace(this.ConfigMapKeyRef.String(), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`,
`SecretKeyRef:` + strings.Replace(this.SecretKeyRef.String(), "SecretKeySelector", "SecretKeySelector", 1) + `,`,
+ `FileKeyRef:` + strings.Replace(this.FileKeyRef.String(), "FileKeySelector", "FileKeySelector", 1) + `,`,
`}`,
}, "")
return s
@@ -27951,6 +28209,19 @@ func (this *FCVolumeSource) String() string {
}, "")
return s
}
+func (this *FileKeySelector) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&FileKeySelector{`,
+ `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`,
+ `Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `Optional:` + valueToStringGenerated(this.Optional) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *FlexPersistentVolumeSource) String() string {
if this == nil {
return "nil"
@@ -29169,6 +29440,21 @@ func (this *PodAttachOptions) String() string {
}, "")
return s
}
+func (this *PodCertificateProjection) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PodCertificateProjection{`,
+ `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`,
+ `KeyType:` + fmt.Sprintf("%v", this.KeyType) + `,`,
+ `MaxExpirationSeconds:` + valueToStringGenerated(this.MaxExpirationSeconds) + `,`,
+ `CredentialBundlePath:` + fmt.Sprintf("%v", this.CredentialBundlePath) + `,`,
+ `KeyPath:` + fmt.Sprintf("%v", this.KeyPath) + `,`,
+ `CertificateChainPath:` + fmt.Sprintf("%v", this.CertificateChainPath) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *PodCondition) String() string {
if this == nil {
return "nil"
@@ -29503,6 +29789,7 @@ func (this *PodSpec) String() string {
`SchedulingGates:` + repeatedStringForSchedulingGates + `,`,
`ResourceClaims:` + repeatedStringForResourceClaims + `,`,
`Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`,
+ `HostnameOverride:` + valueToStringGenerated(this.HostnameOverride) + `,`,
`}`,
}, "")
return s
@@ -30673,6 +30960,7 @@ func (this *VolumeProjection) String() string {
`ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`,
`ServiceAccountToken:` + strings.Replace(this.ServiceAccountToken.String(), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`,
`ClusterTrustBundle:` + strings.Replace(this.ClusterTrustBundle.String(), "ClusterTrustBundleProjection", "ClusterTrustBundleProjection", 1) + `,`,
+ `PodCertificate:` + strings.Replace(this.PodCertificate.String(), "PodCertificateProjection", "PodCertificateProjection", 1) + `,`,
`}`,
}, "")
return s
@@ -39943,6 +40231,42 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FileKeyRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.FileKeyRef == nil {
+ m.FileKeyRef = &FileKeySelector{}
+ }
+ if err := m.FileKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -41822,61 +42146,247 @@ func (m *EventSource) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Host = string(dAtA[iNdEx:postIndex])
+ m.Host = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ExecAction) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExecAction: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExecAction: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Command = append(m.Command, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *FCVolumeSource) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: FCVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: FCVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetWWNs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TargetWWNs = append(m.TargetWWNs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Lun = &v
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FSType = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExecAction) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
}
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExecAction: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExecAction: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ m.ReadOnly = bool(v != 0)
+ case 5:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field WWIDs", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -41904,7 +42414,7 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Command = append(m.Command, string(dAtA[iNdEx:postIndex]))
+ m.WWIDs = append(m.WWIDs, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -41927,7 +42437,7 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *FCVolumeSource) Unmarshal(dAtA []byte) error {
+func (m *FileKeySelector) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -41950,15 +42460,15 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: FCVolumeSource: wiretype end group for non-group")
+ return fmt.Errorf("proto: FileKeySelector: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: FCVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: FileKeySelector: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TargetWWNs", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -41986,13 +42496,13 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.TargetWWNs = append(m.TargetWWNs, string(dAtA[iNdEx:postIndex]))
+ m.VolumeName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType)
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
}
- var v int32
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -42002,15 +42512,27 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= int32(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- m.Lun = &v
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -42038,11 +42560,11 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.FSType = string(dAtA[iNdEx:postIndex])
+ m.Key = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
@@ -42059,39 +42581,8 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error {
break
}
}
- m.ReadOnly = bool(v != 0)
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field WWIDs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.WWIDs = append(m.WWIDs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
+ b := bool(v != 0)
+ m.Optional = &b
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -55916,17 +56407,135 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group")
+ return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{})
+ if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{})
+ if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType)
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
}
- var msglen int
+ var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -55936,31 +56545,77 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
- return ErrInvalidLengthGenerated
+ m.Stdin = bool(v != 0)
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
}
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if postIndex > l {
- return io.ErrUnexpectedEOF
+ m.Stdout = bool(v != 0)
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
}
- m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{})
- if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- iNdEx = postIndex
- case 2:
+ m.Stderr = bool(v != 0)
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TTY = bool(v != 0)
+ case 5:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -55970,25 +56625,23 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{})
- if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Container = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -56011,7 +56664,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
+func (m *PodCertificateProjection) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -56034,17 +56687,17 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group")
+ return fmt.Errorf("proto: PodCertificateProjection: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: PodCertificateProjection: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
}
- var v int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -56054,17 +56707,29 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- m.Stdin = bool(v != 0)
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SignerName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KeyType", wireType)
}
- var v int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -56074,17 +56739,29 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- m.Stdout = bool(v != 0)
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.KeyType = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
case 3:
if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxExpirationSeconds", wireType)
}
- var v int
+ var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -56094,17 +56771,17 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= int(b&0x7F) << shift
+ v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
- m.Stderr = bool(v != 0)
+ m.MaxExpirationSeconds = &v
case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType)
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CredentialBundlePath", wireType)
}
- var v int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -56114,15 +56791,27 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- m.TTY = bool(v != 0)
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CredentialBundlePath = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
case 5:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field KeyPath", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -56150,7 +56839,39 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Container = string(dAtA[iNdEx:postIndex])
+ m.KeyPath = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CertificateChainPath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CertificateChainPath = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -60088,6 +60809,39 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 41:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostnameOverride", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.HostnameOverride = &s
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -73489,6 +74243,42 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodCertificate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PodCertificate == nil {
+ m.PodCertificate = &PodCertificateProjection{}
+ }
+ if err := m.PodCertificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto
index 9b48fb1c3..5abfaffdc 100644
--- a/vendor/k8s.io/api/core/v1/generated.proto
+++ b/vendor/k8s.io/api/core/v1/generated.proto
@@ -737,8 +737,8 @@ message Container {
repeated ContainerPort ports = 6;
// List of sources to populate environment variables in the container.
- // The keys defined within a source must be a C_IDENTIFIER. All invalid keys
- // will be reported as an event when the container is starting. When a key exists in multiple
+ // The keys defined within a source may consist of any printable ASCII characters except '='.
+ // When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
@@ -1344,7 +1344,8 @@ message EndpointsList {
// EnvFromSource represents the source of a set of ConfigMaps or Secrets
message EnvFromSource {
- // Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.
+ // Optional text to prepend to the name of each environment variable.
+ // May consist of any printable ASCII characters except '='.
// +optional
optional string prefix = 1;
@@ -1359,7 +1360,8 @@ message EnvFromSource {
// EnvVar represents an environment variable present in a Container.
message EnvVar {
- // Name of the environment variable. Must be a C_IDENTIFIER.
+ // Name of the environment variable.
+ // May consist of any printable ASCII characters except '='.
optional string name = 1;
// Variable references $(VAR_NAME) are expanded
@@ -1398,6 +1400,13 @@ message EnvVarSource {
// Selects a key of a secret in the pod's namespace
// +optional
optional SecretKeySelector secretKeyRef = 4;
+
+ // FileKeyRef selects a key of the env file.
+ // Requires the EnvFiles feature gate to be enabled.
+ //
+ // +featureGate=EnvFiles
+ // +optional
+ optional FileKeySelector fileKeyRef = 5;
}
// An EphemeralContainer is a temporary container that you may add to an existing Pod for
@@ -1479,8 +1488,8 @@ message EphemeralContainerCommon {
repeated ContainerPort ports = 6;
// List of sources to populate environment variables in the container.
- // The keys defined within a source must be a C_IDENTIFIER. All invalid keys
- // will be reported as an event when the container is starting. When a key exists in multiple
+ // The keys defined within a source may consist of any printable ASCII characters except '='.
+ // When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
@@ -1776,6 +1785,36 @@ message FCVolumeSource {
repeated string wwids = 5;
}
+// FileKeySelector selects a key of the env file.
+// +structType=atomic
+message FileKeySelector {
+ // The name of the volume mount containing the env file.
+ // +required
+ optional string volumeName = 1;
+
+ // The path within the volume from which to select the file.
+ // Must be relative and may not contain the '..' path or start with '..'.
+ // +required
+ optional string path = 2;
+
+ // The key within the env file. An invalid key will prevent the pod from starting.
+ // The keys defined within a source may consist of any printable ASCII characters except '='.
+ // During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
+ // +required
+ optional string key = 3;
+
+ // Specify whether the file or its key must be defined. If the file or key
+ // does not exist, then the env var is not published.
+ // If optional is set to true and the specified key does not exist,
+ // the environment variable will not be set in the Pod's containers.
+ //
+ // If optional is set to false and the specified key does not exist,
+ // an error will be returned during Pod creation.
+ // +optional
+ // +default=false
+ optional bool optional = 4;
+}
+
// FlexPersistentVolumeSource represents a generic persistent volume resource that is
// provisioned/attached using an exec based plugin.
message FlexPersistentVolumeSource {
@@ -1949,7 +1988,6 @@ message GlusterfsPersistentVolumeSource {
// Glusterfs volumes do not support ownership management or SELinux relabeling.
message GlusterfsVolumeSource {
// endpoints is the endpoint name that details Glusterfs topology.
- // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
optional string endpoints = 1;
// path is the Glusterfs volume path.
@@ -3160,15 +3198,13 @@ message PersistentVolumeClaimSpec {
// volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
// If specified, the CSI driver will create or update the volume with the attributes defined
// in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
- // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
- // will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
- // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
- // will be set by the persistentvolume controller if it exists.
+ // it can be changed after the claim is created. An empty string or nil value indicates that no
+ // VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
+ // this field can be reset to its previous value (including nil) to cancel the modification.
// If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
// exists.
// More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
- // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
// +featureGate=VolumeAttributesClass
// +optional
optional string volumeAttributesClassName = 9;
@@ -3267,14 +3303,12 @@ message PersistentVolumeClaimStatus {
// currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
// When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
- // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
// +featureGate=VolumeAttributesClass
// +optional
optional string currentVolumeAttributesClassName = 8;
// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
// When this is unset, there is no ModifyVolume operation being attempted.
- // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
// +featureGate=VolumeAttributesClass
// +optional
optional ModifyVolumeStatus modifyVolumeStatus = 9;
@@ -3515,7 +3549,6 @@ message PersistentVolumeSpec {
// after a volume has been updated successfully to a new class.
// For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
// PersistentVolumeClaims during the binding process.
- // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
// +featureGate=VolumeAttributesClass
// +optional
optional string volumeAttributesClassName = 10;
@@ -3684,8 +3717,8 @@ message PodAntiAffinity {
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling anti-affinity expressions, etc.),
- // compute a sum by iterating through the elements of this field and adding
- // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ // compute a sum by iterating through the elements of this field and subtracting
+ // "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
// +listType=atomic
@@ -3725,6 +3758,79 @@ message PodAttachOptions {
optional string container = 5;
}
+// PodCertificateProjection provides a private key and X.509 certificate in the
+// pod filesystem.
+message PodCertificateProjection {
+ // Kubelet's generated CSRs will be addressed to this signer.
+ //
+ // +required
+ optional string signerName = 1;
+
+ // The type of keypair Kubelet will generate for the pod.
+ //
+ // Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
+ // "ECDSAP521", and "ED25519".
+ //
+ // +required
+ optional string keyType = 2;
+
+ // maxExpirationSeconds is the maximum lifetime permitted for the
+ // certificate.
+ //
+ // Kubelet copies this value verbatim into the PodCertificateRequests it
+ // generates for this projection.
+ //
+ // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
+ // will reject values shorter than 3600 (1 hour). The maximum allowable
+ // value is 7862400 (91 days).
+ //
+ // The signer implementation is then free to issue a certificate with any
+ // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
+ // seconds (1 hour). This constraint is enforced by kube-apiserver.
+ // `kubernetes.io` signers will never issue certificates with a lifetime
+ // longer than 24 hours.
+ //
+ // +optional
+ optional int32 maxExpirationSeconds = 3;
+
+ // Write the credential bundle at this path in the projected volume.
+ //
+ // The credential bundle is a single file that contains multiple PEM blocks.
+ // The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
+ // key.
+ //
+ // The remaining blocks are CERTIFICATE blocks, containing the issued
+ // certificate chain from the signer (leaf and any intermediates).
+ //
+ // Using credentialBundlePath lets your Pod's application code make a single
+ // atomic read that retrieves a consistent key and certificate chain. If you
+ // project them to separate files, your application code will need to
+ // additionally check that the leaf certificate was issued to the key.
+ //
+ // +optional
+ optional string credentialBundlePath = 4;
+
+ // Write the key at this path in the projected volume.
+ //
+ // Most applications should use credentialBundlePath. When using keyPath
+ // and certificateChainPath, your application needs to check that the key
+ // and leaf certificate are consistent, because it is possible to read the
+ // files mid-rotation.
+ //
+ // +optional
+ optional string keyPath = 5;
+
+ // Write the certificate chain at this path in the projected volume.
+ //
+ // Most applications should use credentialBundlePath. When using keyPath
+ // and certificateChainPath, your application needs to check that the key
+ // and leaf certificate are consistent, because it is possible to read the
+ // files mid-rotation.
+ //
+ // +optional
+ optional string certificateChainPath = 6;
+}
+
// PodCondition contains details for the current condition of this pod.
message PodCondition {
// Type is the type of the condition.
@@ -4269,7 +4375,9 @@ message PodSpec {
optional string nodeName = 10;
// Host networking requested for this pod. Use the host's network namespace.
- // If this option is set, the ports that will be used must be specified.
+ // When using HostNetwork you should specify ports so the scheduler is aware.
+ // When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
+ // and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
// Default to false.
// +k8s:conversion-gen=false
// +optional
@@ -4516,6 +4624,21 @@ message PodSpec {
// +featureGate=PodLevelResources
// +optional
optional ResourceRequirements resources = 40;
+
+ // HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
+ // This field only specifies the pod's hostname and does not affect its DNS records.
+ // When this field is set to a non-empty string:
+ // - It takes precedence over the values set in `hostname` and `subdomain`.
+ // - The Pod's hostname will be set to this value.
+ // - `setHostnameAsFQDN` must be nil or set to false.
+ // - `hostNetwork` must be set to false.
+ //
+ // This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
+ // Requires the HostnameOverride feature gate to be enabled.
+ //
+ // +featureGate=HostnameOverride
+ // +optional
+ optional string hostnameOverride = 41;
}
// PodStatus represents information about the status of a pod. Status may trail the actual
@@ -6301,7 +6424,6 @@ message Taint {
optional string effect = 3;
// TimeAdded represents the time at which the taint was added.
- // It is only written for NoExecute taints.
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4;
}
@@ -6682,6 +6804,44 @@ message VolumeProjection {
// +featureGate=ClusterTrustBundleProjection
// +optional
optional ClusterTrustBundleProjection clusterTrustBundle = 5;
+
+ // Projects an auto-rotating credential bundle (private key and certificate
+ // chain) that the pod can use either as a TLS client or server.
+ //
+ // Kubelet generates a private key and uses it to send a
+ // PodCertificateRequest to the named signer. Once the signer approves the
+ // request and issues a certificate chain, Kubelet writes the key and
+ // certificate chain to the pod filesystem. The pod does not start until
+ // certificates have been issued for each podCertificate projected volume
+ // source in its spec.
+ //
+ // Kubelet will begin trying to rotate the certificate at the time indicated
+ // by the signer using the PodCertificateRequest.Status.BeginRefreshAt
+ // timestamp.
+ //
+ // Kubelet can write a single file, indicated by the credentialBundlePath
+ // field, or separate files, indicated by the keyPath and
+ // certificateChainPath fields.
+ //
+ // The credential bundle is a single file in PEM format. The first PEM
+ // entry is the private key (in PKCS#8 format), and the remaining PEM
+ // entries are the certificate chain issued by the signer (typically,
+ // signers will return their certificate chain in leaf-to-root order).
+ //
+ // Prefer using the credential bundle format, since your application code
+ // can read it atomically. If you use keyPath and certificateChainPath,
+ // your application must make two separate file reads. If these coincide
+ // with a certificate rotation, it is possible that the private key and leaf
+ // certificate you read may not correspond to each other. Your application
+ // will need to check for this condition, and re-read until they are
+ // consistent.
+ //
+ // The named signer controls chooses the format of the certificate it
+ // issues; consult the signer implementation's documentation to learn how to
+ // use the certificates it issues.
+ //
+ // +featureGate=PodCertificateProjection +optional
+ optional PodCertificateProjection podCertificate = 6;
}
// VolumeResourceRequirements describes the storage resource requirements for a volume.
@@ -6753,13 +6913,12 @@ message VolumeSource {
// iscsi represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
- // More info: https://examples.k8s.io/volumes/iscsi/README.md
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
// +optional
optional ISCSIVolumeSource iscsi = 8;
// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
// Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
- // More info: https://examples.k8s.io/volumes/glusterfs/README.md
// +optional
optional GlusterfsVolumeSource glusterfs = 9;
@@ -6771,7 +6930,6 @@ message VolumeSource {
// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
// Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
- // More info: https://examples.k8s.io/volumes/rbd/README.md
// +optional
optional RBDVolumeSource rbd = 11;
diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go
index f7641e485..e6b8f9daa 100644
--- a/vendor/k8s.io/api/core/v1/types.go
+++ b/vendor/k8s.io/api/core/v1/types.go
@@ -91,12 +91,11 @@ type VolumeSource struct {
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"`
// iscsi represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
- // More info: https://examples.k8s.io/volumes/iscsi/README.md
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
// +optional
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
// Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
- // More info: https://examples.k8s.io/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
// persistentVolumeClaimVolumeSource represents a reference to a
@@ -106,7 +105,6 @@ type VolumeSource struct {
PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
// Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
- // More info: https://examples.k8s.io/volumes/rbd/README.md
// +optional
RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
// flexVolume represents a generic volume resource that is
@@ -437,7 +435,6 @@ type PersistentVolumeSpec struct {
// after a volume has been updated successfully to a new class.
// For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
// PersistentVolumeClaims during the binding process.
- // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
// +featureGate=VolumeAttributesClass
// +optional
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"`
@@ -616,15 +613,13 @@ type PersistentVolumeClaimSpec struct {
// volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
// If specified, the CSI driver will create or update the volume with the attributes defined
// in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
- // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
- // will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
- // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
- // will be set by the persistentvolume controller if it exists.
+ // it can be changed after the claim is created. An empty string or nil value indicates that no
+ // VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
+ // this field can be reset to its previous value (including nil) to cancel the modification.
// If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
// exists.
// More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
- // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
// +featureGate=VolumeAttributesClass
// +optional
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"`
@@ -851,13 +846,11 @@ type PersistentVolumeClaimStatus struct {
AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"`
// currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
// When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
- // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
// +featureGate=VolumeAttributesClass
// +optional
CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"`
// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
// When this is unset, there is no ModifyVolume operation being attempted.
- // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
// +featureGate=VolumeAttributesClass
// +optional
ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"`
@@ -972,7 +965,6 @@ type EmptyDirVolumeSource struct {
// Glusterfs volumes do not support ownership management or SELinux relabeling.
type GlusterfsVolumeSource struct {
// endpoints is the endpoint name that details Glusterfs topology.
- // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
// path is the Glusterfs volume path.
@@ -1993,6 +1985,79 @@ type ClusterTrustBundleProjection struct {
Path string `json:"path" protobuf:"bytes,4,rep,name=path"`
}
+// PodCertificateProjection provides a private key and X.509 certificate in the
+// pod filesystem.
+type PodCertificateProjection struct {
+ // Kubelet's generated CSRs will be addressed to this signer.
+ //
+ // +required
+ SignerName string `json:"signerName,omitempty" protobuf:"bytes,1,rep,name=signerName"`
+
+ // The type of keypair Kubelet will generate for the pod.
+ //
+ // Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
+ // "ECDSAP521", and "ED25519".
+ //
+ // +required
+ KeyType string `json:"keyType,omitempty" protobuf:"bytes,2,rep,name=keyType"`
+
+ // maxExpirationSeconds is the maximum lifetime permitted for the
+ // certificate.
+ //
+ // Kubelet copies this value verbatim into the PodCertificateRequests it
+ // generates for this projection.
+ //
+ // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
+ // will reject values shorter than 3600 (1 hour). The maximum allowable
+ // value is 7862400 (91 days).
+ //
+ // The signer implementation is then free to issue a certificate with any
+ // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
+ // seconds (1 hour). This constraint is enforced by kube-apiserver.
+ // `kubernetes.io` signers will never issue certificates with a lifetime
+ // longer than 24 hours.
+ //
+ // +optional
+ MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty" protobuf:"varint,3,opt,name=maxExpirationSeconds"`
+
+ // Write the credential bundle at this path in the projected volume.
+ //
+ // The credential bundle is a single file that contains multiple PEM blocks.
+ // The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
+ // key.
+ //
+ // The remaining blocks are CERTIFICATE blocks, containing the issued
+ // certificate chain from the signer (leaf and any intermediates).
+ //
+ // Using credentialBundlePath lets your Pod's application code make a single
+ // atomic read that retrieves a consistent key and certificate chain. If you
+ // project them to separate files, your application code will need to
+ // additionally check that the leaf certificate was issued to the key.
+ //
+ // +optional
+ CredentialBundlePath string `json:"credentialBundlePath,omitempty" protobuf:"bytes,4,rep,name=credentialBundlePath"`
+
+ // Write the key at this path in the projected volume.
+ //
+ // Most applications should use credentialBundlePath. When using keyPath
+ // and certificateChainPath, your application needs to check that the key
+ // and leaf certificate are consistent, because it is possible to read the
+ // files mid-rotation.
+ //
+ // +optional
+ KeyPath string `json:"keyPath,omitempty" protobuf:"bytes,5,rep,name=keyPath"`
+
+ // Write the certificate chain at this path in the projected volume.
+ //
+ // Most applications should use credentialBundlePath. When using keyPath
+ // and certificateChainPath, your application needs to check that the key
+ // and leaf certificate are consistent, because it is possible to read the
+ // files mid-rotation.
+ //
+ // +optional
+ CertificateChainPath string `json:"certificateChainPath,omitempty" protobuf:"bytes,6,rep,name=certificateChainPath"`
+}
+
// Represents a projected volume source
type ProjectedVolumeSource struct {
// sources is the list of volume projections. Each entry in this list
@@ -2043,6 +2108,44 @@ type VolumeProjection struct {
// +featureGate=ClusterTrustBundleProjection
// +optional
ClusterTrustBundle *ClusterTrustBundleProjection `json:"clusterTrustBundle,omitempty" protobuf:"bytes,5,opt,name=clusterTrustBundle"`
+
+ // Projects an auto-rotating credential bundle (private key and certificate
+ // chain) that the pod can use either as a TLS client or server.
+ //
+ // Kubelet generates a private key and uses it to send a
+ // PodCertificateRequest to the named signer. Once the signer approves the
+ // request and issues a certificate chain, Kubelet writes the key and
+ // certificate chain to the pod filesystem. The pod does not start until
+ // certificates have been issued for each podCertificate projected volume
+ // source in its spec.
+ //
+ // Kubelet will begin trying to rotate the certificate at the time indicated
+ // by the signer using the PodCertificateRequest.Status.BeginRefreshAt
+ // timestamp.
+ //
+ // Kubelet can write a single file, indicated by the credentialBundlePath
+ // field, or separate files, indicated by the keyPath and
+ // certificateChainPath fields.
+ //
+ // The credential bundle is a single file in PEM format. The first PEM
+ // entry is the private key (in PKCS#8 format), and the remaining PEM
+ // entries are the certificate chain issued by the signer (typically,
+ // signers will return their certificate chain in leaf-to-root order).
+ //
+ // Prefer using the credential bundle format, since your application code
+ // can read it atomically. If you use keyPath and certificateChainPath,
+ // your application must make two separate file reads. If these coincide
+ // with a certificate rotation, it is possible that the private key and leaf
+ // certificate you read may not correspond to each other. Your application
+ // will need to check for this condition, and re-read until they are
+ // consistent.
+ //
+ // The named signer controls chooses the format of the certificate it
+ // issues; consult the signer implementation's documentation to learn how to
+ // use the certificates it issues.
+ //
+ // +featureGate=PodCertificateProjection +optional
+ PodCertificate *PodCertificateProjection `json:"podCertificate,omitempty" protobuf:"bytes,6,opt,name=podCertificate"`
}
const (
@@ -2351,7 +2454,8 @@ type VolumeDevice struct {
// EnvVar represents an environment variable present in a Container.
type EnvVar struct {
- // Name of the environment variable. Must be a C_IDENTIFIER.
+ // Name of the environment variable.
+ // May consist of any printable ASCII characters except '='.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Optional: no more than one of the following may be specified.
@@ -2388,6 +2492,39 @@ type EnvVarSource struct {
// Selects a key of a secret in the pod's namespace
// +optional
SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"`
+ // FileKeyRef selects a key of the env file.
+ // Requires the EnvFiles feature gate to be enabled.
+ //
+ // +featureGate=EnvFiles
+ // +optional
+ FileKeyRef *FileKeySelector `json:"fileKeyRef,omitempty" protobuf:"bytes,5,opt,name=fileKeyRef"`
+}
+
+// FileKeySelector selects a key of the env file.
+// +structType=atomic
+type FileKeySelector struct {
+ // The name of the volume mount containing the env file.
+ // +required
+ VolumeName string `json:"volumeName" protobuf:"bytes,1,opt,name=volumeName"`
+ // The path within the volume from which to select the file.
+ // Must be relative and may not contain the '..' path or start with '..'.
+ // +required
+ Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
+ // The key within the env file. An invalid key will prevent the pod from starting.
+ // The keys defined within a source may consist of any printable ASCII characters except '='.
+ // During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
+ // +required
+ Key string `json:"key" protobuf:"bytes,3,opt,name=key"`
+ // Specify whether the file or its key must be defined. If the file or key
+ // does not exist, then the env var is not published.
+ // If optional is set to true and the specified key does not exist,
+ // the environment variable will not be set in the Pod's containers.
+ //
+ // If optional is set to false and the specified key does not exist,
+ // an error will be returned during Pod creation.
+ // +optional
+ // +default=false
+ Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// ObjectFieldSelector selects an APIVersioned field of an object.
@@ -2439,7 +2576,8 @@ type SecretKeySelector struct {
// EnvFromSource represents the source of a set of ConfigMaps or Secrets
type EnvFromSource struct {
- // Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.
+ // Optional text to prepend to the name of each environment variable.
+ // May consist of any printable ASCII characters except '='.
// +optional
Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
// The ConfigMap to select from
@@ -2805,8 +2943,8 @@ type Container struct {
// +listMapKey=protocol
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the container.
- // The keys defined within a source must be a C_IDENTIFIER. All invalid keys
- // will be reported as an event when the container is starting. When a key exists in multiple
+ // The keys defined within a source may consist of any printable ASCII characters except '='.
+ // When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
@@ -3678,8 +3816,8 @@ type PodAntiAffinity struct {
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling anti-affinity expressions, etc.),
- // compute a sum by iterating through the elements of this field and adding
- // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ // compute a sum by iterating through the elements of this field and subtracting
+ // "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
// +listType=atomic
@@ -3806,7 +3944,6 @@ type Taint struct {
// Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
// TimeAdded represents the time at which the taint was added.
- // It is only written for NoExecute taints.
// +optional
TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"`
}
@@ -3983,7 +4120,9 @@ type PodSpec struct {
// +optional
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
// Host networking requested for this pod. Use the host's network namespace.
- // If this option is set, the ports that will be used must be specified.
+ // When using HostNetwork you should specify ports so the scheduler is aware.
+ // When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
+ // and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
// Default to false.
// +k8s:conversion-gen=false
// +optional
@@ -4206,6 +4345,20 @@ type PodSpec struct {
// +featureGate=PodLevelResources
// +optional
Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,40,opt,name=resources"`
+ // HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
+ // This field only specifies the pod's hostname and does not affect its DNS records.
+ // When this field is set to a non-empty string:
+ // - It takes precedence over the values set in `hostname` and `subdomain`.
+ // - The Pod's hostname will be set to this value.
+ // - `setHostnameAsFQDN` must be nil or set to false.
+ // - `hostNetwork` must be set to false.
+ //
+ // This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
+ // Requires the HostnameOverride feature gate to be enabled.
+ //
+ // +featureGate=HostnameOverride
+ // +optional
+ HostnameOverride *string `json:"hostnameOverride,omitempty" protobuf:"bytes,41,opt,name=hostnameOverride"`
}
// PodResourceClaim references exactly one ResourceClaim, either directly
@@ -4799,8 +4952,8 @@ type EphemeralContainerCommon struct {
// +listMapKey=protocol
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the container.
- // The keys defined within a source must be a C_IDENTIFIER. All invalid keys
- // will be reported as an event when the container is starting. When a key exists in multiple
+ // The keys defined within a source may consist of any printable ASCII characters except '='.
+ // When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
@@ -5311,6 +5464,7 @@ type ReplicationControllerCondition struct {
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.0
+// +k8s:supportsSubresource=/scale
// ReplicationController represents the configuration of a replication controller.
type ReplicationController struct {
diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
index 9e987eefd..ef295cf70 100644
--- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
@@ -356,7 +356,7 @@ var map_Container = map[string]string{
"args": "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
"ports": "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.",
- "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
+ "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source may consist of any printable ASCII characters except '='. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
"env": "List of environment variables to set in the container. Cannot be updated.",
"resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
"resizePolicy": "Resources resize policy for the container.",
@@ -597,7 +597,7 @@ func (EndpointsList) SwaggerDoc() map[string]string {
var map_EnvFromSource = map[string]string{
"": "EnvFromSource represents the source of a set of ConfigMaps or Secrets",
- "prefix": "Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.",
+ "prefix": "Optional text to prepend to the name of each environment variable. May consist of any printable ASCII characters except '='.",
"configMapRef": "The ConfigMap to select from",
"secretRef": "The Secret to select from",
}
@@ -608,7 +608,7 @@ func (EnvFromSource) SwaggerDoc() map[string]string {
var map_EnvVar = map[string]string{
"": "EnvVar represents an environment variable present in a Container.",
- "name": "Name of the environment variable. Must be a C_IDENTIFIER.",
+ "name": "Name of the environment variable. May consist of any printable ASCII characters except '='.",
"value": "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".",
"valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.",
}
@@ -623,6 +623,7 @@ var map_EnvVarSource = map[string]string{
"resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.",
"configMapKeyRef": "Selects a key of a ConfigMap.",
"secretKeyRef": "Selects a key of a secret in the pod's namespace",
+ "fileKeyRef": "FileKeyRef selects a key of the env file. Requires the EnvFiles feature gate to be enabled.",
}
func (EnvVarSource) SwaggerDoc() map[string]string {
@@ -646,7 +647,7 @@ var map_EphemeralContainerCommon = map[string]string{
"args": "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
"ports": "Ports are not allowed for ephemeral containers.",
- "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
+ "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source may consist of any printable ASCII characters except '='. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
"env": "List of environment variables to set in the container. Cannot be updated.",
"resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.",
"resizePolicy": "Resources resize policy for the container.",
@@ -754,6 +755,18 @@ func (FCVolumeSource) SwaggerDoc() map[string]string {
return map_FCVolumeSource
}
+var map_FileKeySelector = map[string]string{
+ "": "FileKeySelector selects a key of the env file.",
+ "volumeName": "The name of the volume mount containing the env file.",
+ "path": "The path within the volume from which to select the file. Must be relative and may not contain the '..' path or start with '..'.",
+ "key": "The key within the env file. An invalid key will prevent the pod from starting. The keys defined within a source may consist of any printable ASCII characters except '='. During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.",
+ "optional": "Specify whether the file or its key must be defined. If the file or key does not exist, then the env var is not published. If optional is set to true and the specified key does not exist, the environment variable will not be set in the Pod's containers.\n\nIf optional is set to false and the specified key does not exist, an error will be returned during Pod creation.",
+}
+
+func (FileKeySelector) SwaggerDoc() map[string]string {
+ return map_FileKeySelector
+}
+
var map_FlexPersistentVolumeSource = map[string]string{
"": "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.",
"driver": "driver is the name of the driver to use for this volume.",
@@ -837,7 +850,7 @@ func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string {
var map_GlusterfsVolumeSource = map[string]string{
"": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.",
- "endpoints": "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
+ "endpoints": "endpoints is the endpoint name that details Glusterfs topology.",
"path": "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
"readOnly": "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
}
@@ -1446,7 +1459,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{
"volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
"dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.",
"dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.",
- "volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).",
+ "volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string or nil value indicates that no VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/",
}
func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
@@ -1461,8 +1474,8 @@ var map_PersistentVolumeClaimStatus = map[string]string{
"conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.",
"allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
"allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
- "currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
- "modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
+ "currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim",
+ "modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted.",
}
func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string {
@@ -1539,7 +1552,7 @@ var map_PersistentVolumeSpec = map[string]string{
"mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
"volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.",
"nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
- "volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
+ "volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process.",
}
func (PersistentVolumeSpec) SwaggerDoc() map[string]string {
@@ -1606,7 +1619,7 @@ func (PodAffinityTerm) SwaggerDoc() map[string]string {
var map_PodAntiAffinity = map[string]string{
"": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.",
"requiredDuringSchedulingIgnoredDuringExecution": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
- "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
+ "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and subtracting \"weight\" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
}
func (PodAntiAffinity) SwaggerDoc() map[string]string {
@@ -1626,6 +1639,20 @@ func (PodAttachOptions) SwaggerDoc() map[string]string {
return map_PodAttachOptions
}
+var map_PodCertificateProjection = map[string]string{
+ "": "PodCertificateProjection provides a private key and X.509 certificate in the pod filesystem.",
+ "signerName": "Kubelet's generated CSRs will be addressed to this signer.",
+ "keyType": "The type of keypair Kubelet will generate for the pod.\n\nValid values are \"RSA3072\", \"RSA4096\", \"ECDSAP256\", \"ECDSAP384\", \"ECDSAP521\", and \"ED25519\".",
+ "maxExpirationSeconds": "maxExpirationSeconds is the maximum lifetime permitted for the certificate.\n\nKubelet copies this value verbatim into the PodCertificateRequests it generates for this projection.\n\nIf omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour). The maximum allowable value is 7862400 (91 days).\n\nThe signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour). This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours.",
+ "credentialBundlePath": "Write the credential bundle at this path in the projected volume.\n\nThe credential bundle is a single file that contains multiple PEM blocks. The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private key.\n\nThe remaining blocks are CERTIFICATE blocks, containing the issued certificate chain from the signer (leaf and any intermediates).\n\nUsing credentialBundlePath lets your Pod's application code make a single atomic read that retrieves a consistent key and certificate chain. If you project them to separate files, your application code will need to additionally check that the leaf certificate was issued to the key.",
+ "keyPath": "Write the key at this path in the projected volume.\n\nMost applications should use credentialBundlePath. When using keyPath and certificateChainPath, your application needs to check that the key and leaf certificate are consistent, because it is possible to read the files mid-rotation.",
+ "certificateChainPath": "Write the certificate chain at this path in the projected volume.\n\nMost applications should use credentialBundlePath. When using keyPath and certificateChainPath, your application needs to check that the key and leaf certificate are consistent, because it is possible to read the files mid-rotation.",
+}
+
+func (PodCertificateProjection) SwaggerDoc() map[string]string {
+ return map_PodCertificateProjection
+}
+
var map_PodCondition = map[string]string{
"": "PodCondition contains details for the current condition of this pod.",
"type": "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
@@ -1824,7 +1851,7 @@ var map_PodSpec = map[string]string{
"serviceAccount": "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
"automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
"nodeName": "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename",
- "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
+ "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. When using HostNetwork you should specify ports so the scheduler is aware. When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`, and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`. Default to false.",
"hostPID": "Use the host's pid namespace. Optional: Default to false.",
"hostIPC": "Use the host's ipc namespace. Optional: Default to false.",
"shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.",
@@ -1851,6 +1878,7 @@ var map_PodSpec = map[string]string{
"schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.",
"resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
"resources": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.",
+ "hostnameOverride": "HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod. This field only specifies the pod's hostname and does not affect its DNS records. When this field is set to a non-empty string: - It takes precedence over the values set in `hostname` and `subdomain`. - The Pod's hostname will be set to this value. - `setHostnameAsFQDN` must be nil or set to false. - `hostNetwork` must be set to false.\n\nThis field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters. Requires the HostnameOverride feature gate to be enabled.",
}
func (PodSpec) SwaggerDoc() map[string]string {
@@ -2587,7 +2615,7 @@ var map_Taint = map[string]string{
"key": "Required. The taint key to be applied to a node.",
"value": "The taint value corresponding to the taint key.",
"effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.",
- "timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.",
+ "timeAdded": "TimeAdded represents the time at which the taint was added.",
}
func (Taint) SwaggerDoc() map[string]string {
@@ -2727,6 +2755,7 @@ var map_VolumeProjection = map[string]string{
"configMap": "configMap information about the configMap data to project",
"serviceAccountToken": "serviceAccountToken is information about the serviceAccountToken data to project",
"clusterTrustBundle": "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time.",
+ "podCertificate": "Projects an auto-rotating credential bundle (private key and certificate chain) that the pod can use either as a TLS client or server.\n\nKubelet generates a private key and uses it to send a PodCertificateRequest to the named signer. Once the signer approves the request and issues a certificate chain, Kubelet writes the key and certificate chain to the pod filesystem. The pod does not start until certificates have been issued for each podCertificate projected volume source in its spec.\n\nKubelet will begin trying to rotate the certificate at the time indicated by the signer using the PodCertificateRequest.Status.BeginRefreshAt timestamp.\n\nKubelet can write a single file, indicated by the credentialBundlePath field, or separate files, indicated by the keyPath and certificateChainPath fields.\n\nThe credential bundle is a single file in PEM format. The first PEM entry is the private key (in PKCS#8 format), and the remaining PEM entries are the certificate chain issued by the signer (typically, signers will return their certificate chain in leaf-to-root order).\n\nPrefer using the credential bundle format, since your application code can read it atomically. If you use keyPath and certificateChainPath, your application must make two separate file reads. If these coincide with a certificate rotation, it is possible that the private key and leaf certificate you read may not correspond to each other. Your application will need to check for this condition, and re-read until they are consistent.\n\nThe named signer controls chooses the format of the certificate it issues; consult the signer implementation's documentation to learn how to use the certificates it issues.",
}
func (VolumeProjection) SwaggerDoc() map[string]string {
@@ -2752,10 +2781,10 @@ var map_VolumeSource = map[string]string{
"gitRepo": "gitRepo represents a git repository at a particular revision. Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
"secret": "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
"nfs": "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
- "iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md",
- "glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
+ "iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi",
+ "glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.",
"persistentVolumeClaim": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
- "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md",
+ "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.",
"flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.",
"cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
"cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.",
diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
index 619c52542..3c65e07c9 100644
--- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
@@ -1433,6 +1433,11 @@ func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) {
*out = new(SecretKeySelector)
(*in).DeepCopyInto(*out)
}
+ if in.FileKeyRef != nil {
+ in, out := &in.FileKeyRef, &out.FileKeyRef
+ *out = new(FileKeySelector)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -1736,6 +1741,27 @@ func (in *FCVolumeSource) DeepCopy() *FCVolumeSource {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FileKeySelector) DeepCopyInto(out *FileKeySelector) {
+ *out = *in
+ if in.Optional != nil {
+ in, out := &in.Optional, &out.Optional
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileKeySelector.
+func (in *FileKeySelector) DeepCopy() *FileKeySelector {
+ if in == nil {
+ return nil
+ }
+ out := new(FileKeySelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) {
*out = *in
@@ -3797,6 +3823,27 @@ func (in *PodAttachOptions) DeepCopyObject() runtime.Object {
return nil
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCertificateProjection) DeepCopyInto(out *PodCertificateProjection) {
+ *out = *in
+ if in.MaxExpirationSeconds != nil {
+ in, out := &in.MaxExpirationSeconds, &out.MaxExpirationSeconds
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateProjection.
+func (in *PodCertificateProjection) DeepCopy() *PodCertificateProjection {
+ if in == nil {
+ return nil
+ }
+ out := new(PodCertificateProjection)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodCondition) DeepCopyInto(out *PodCondition) {
*out = *in
@@ -4412,6 +4459,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
*out = new(ResourceRequirements)
(*in).DeepCopyInto(*out)
}
+ if in.HostnameOverride != nil {
+ in, out := &in.HostnameOverride, &out.HostnameOverride
+ *out = new(string)
+ **out = **in
+ }
return
}
@@ -6412,6 +6464,11 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) {
*out = new(ClusterTrustBundleProjection)
(*in).DeepCopyInto(*out)
}
+ if in.PodCertificate != nil {
+ in, out := &in.PodCertificate, &out.PodCertificate
+ *out = new(PodCertificateProjection)
+ (*in).DeepCopyInto(*out)
+ }
return
}
diff --git a/vendor/k8s.io/api/extensions/v1beta1/doc.go b/vendor/k8s.io/api/extensions/v1beta1/doc.go
index 7770fab5d..be710973c 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/doc.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/doc.go
@@ -18,5 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:validation-gen=TypeMeta
+// +k8s:validation-gen-input=k8s.io/api/extensions/v1beta1
package v1beta1
diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
index 70fcec0cc..fed0b4835 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
@@ -980,7 +980,7 @@ message RollingUpdateDaemonSet {
// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
// on that node is marked deleted. If the old pod becomes unavailable for any
// reason (Ready transitions to false, is evicted, or is drained) an updated
- // pod is immediatedly created on that node without considering surge limits.
+ // pod is immediately created on that node without considering surge limits.
// Allowing surge implies the possibility that the resources consumed by the
// daemonset on any given node can double if the readiness check fails, and
// so resource intensive daemonsets should take into account that they may
@@ -1039,6 +1039,9 @@ message Scale {
message ScaleSpec {
// desired number of instances for the scaled object.
// +optional
+ // +k8s:optional
+ // +default=0
+ // +k8s:minimum=0
optional int32 replicas = 1;
}
diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go
index b80a7a7e1..c7b50e059 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/types.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/types.go
@@ -27,6 +27,9 @@ import (
type ScaleSpec struct {
// desired number of instances for the scaled object.
// +optional
+ // +k8s:optional
+ // +default=0
+ // +k8s:minimum=0
Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
}
@@ -54,6 +57,7 @@ type ScaleStatus struct {
// +k8s:prerelease-lifecycle-gen:introduced=1.1
// +k8s:prerelease-lifecycle-gen:deprecated=1.2
// +k8s:prerelease-lifecycle-gen:removed=1.16
+// +k8s:isSubresource=/scale
// represents a scaling request for a resource.
type Scale struct {
@@ -398,7 +402,7 @@ type RollingUpdateDaemonSet struct {
// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
// on that node is marked deleted. If the old pod becomes unavailable for any
// reason (Ready transitions to false, is evicted, or is drained) an updated
- // pod is immediatedly created on that node without considering surge limits.
+ // pod is immediately created on that node without considering surge limits.
// Allowing surge implies the possibility that the resources consumed by the
// daemonset on any given node can double if the readiness check fails, and
// so resource intensive daemonsets should take into account that they may
diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
index 923fab3aa..8a158233e 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
@@ -482,7 +482,7 @@ func (RollbackConfig) SwaggerDoc() map[string]string {
var map_RollingUpdateDaemonSet = map[string]string{
"": "Spec to control the desired behavior of daemon set rolling update.",
"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
- "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.",
+ "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.",
}
func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go
new file mode 100644
index 000000000..6d2a1666a
--- /dev/null
+++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go
@@ -0,0 +1,78 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by validation-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ context "context"
+ fmt "fmt"
+
+ operation "k8s.io/apimachinery/pkg/api/operation"
+ safe "k8s.io/apimachinery/pkg/api/safe"
+ validate "k8s.io/apimachinery/pkg/api/validate"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ field "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+func init() { localSchemeBuilder.Register(RegisterValidations) }
+
+// RegisterValidations adds validation functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterValidations(scheme *runtime.Scheme) error {
+ scheme.AddValidationFunc((*Scale)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}) field.ErrorList {
+ switch op.Request.SubresourcePath() {
+ case "/scale":
+ return Validate_Scale(ctx, op, nil /* fldPath */, obj.(*Scale), safe.Cast[*Scale](oldObj))
+ }
+ return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresource: %v", obj, op.Request.SubresourcePath()))}
+ })
+ return nil
+}
+
+func Validate_Scale(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *Scale) (errs field.ErrorList) {
+ // field Scale.TypeMeta has no validation
+ // field Scale.ObjectMeta has no validation
+
+ // field Scale.Spec
+ errs = append(errs,
+ func(fldPath *field.Path, obj, oldObj *ScaleSpec) (errs field.ErrorList) {
+ errs = append(errs, Validate_ScaleSpec(ctx, op, fldPath, obj, oldObj)...)
+ return
+ }(fldPath.Child("spec"), &obj.Spec, safe.Field(oldObj, func(oldObj *Scale) *ScaleSpec { return &oldObj.Spec }))...)
+
+ // field Scale.Status has no validation
+ return errs
+}
+
+func Validate_ScaleSpec(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *ScaleSpec) (errs field.ErrorList) {
+ // field ScaleSpec.Replicas
+ errs = append(errs,
+ func(fldPath *field.Path, obj, oldObj *int32) (errs field.ErrorList) {
+ // optional value-type fields with zero-value defaults are purely documentation
+ if op.Type == operation.Update && (obj == oldObj || (obj != nil && oldObj != nil && *obj == *oldObj)) {
+ return nil // no changes
+ }
+ errs = append(errs, validate.Minimum(ctx, op, fldPath, obj, oldObj, 0)...)
+ return
+ }(fldPath.Child("replicas"), &obj.Replicas, safe.Field(oldObj, func(oldObj *ScaleSpec) *int32 { return &oldObj.Replicas }))...)
+
+ return errs
+}
diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto
index e3e3e9215..16a2792aa 100644
--- a/vendor/k8s.io/api/networking/v1/generated.proto
+++ b/vendor/k8s.io/api/networking/v1/generated.proto
@@ -534,11 +534,12 @@ message NetworkPolicyPort {
// NetworkPolicySpec provides the specification of a NetworkPolicy
message NetworkPolicySpec {
// podSelector selects the pods to which this NetworkPolicy object applies.
- // The array of ingress rules is applied to any pods selected by this field.
+ // The array of rules is applied to any pods selected by this field. An empty
+ // selector matches all pods in the policy's namespace.
// Multiple network policies can select the same set of pods. In this case,
// the ingress rules for each are combined additively.
- // This field is NOT optional and follows standard label selector semantics.
- // An empty podSelector matches all pods in this namespace.
+ // This field is optional. If it is not specified, it defaults to an empty selector.
+ // +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
// ingress is a list of ingress rules to be applied to the selected pods.
diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go
index 216647cee..7d9a4fc94 100644
--- a/vendor/k8s.io/api/networking/v1/types.go
+++ b/vendor/k8s.io/api/networking/v1/types.go
@@ -60,11 +60,12 @@ const (
// NetworkPolicySpec provides the specification of a NetworkPolicy
type NetworkPolicySpec struct {
// podSelector selects the pods to which this NetworkPolicy object applies.
- // The array of ingress rules is applied to any pods selected by this field.
+ // The array of rules is applied to any pods selected by this field. An empty
+ // selector matches all pods in the policy's namespace.
// Multiple network policies can select the same set of pods. In this case,
// the ingress rules for each are combined additively.
- // This field is NOT optional and follows standard label selector semantics.
- // An empty podSelector matches all pods in this namespace.
+ // This field is optional. If it is not specified, it defaults to an empty selector.
+ // +optional
PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"`
// ingress is a list of ingress rules to be applied to the selected pods.
diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
index 0e294848b..6210bb7a5 100644
--- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
@@ -313,7 +313,7 @@ func (NetworkPolicyPort) SwaggerDoc() map[string]string {
var map_NetworkPolicySpec = map[string]string{
"": "NetworkPolicySpec provides the specification of a NetworkPolicy",
- "podSelector": "podSelector selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.",
+ "podSelector": "podSelector selects the pods to which this NetworkPolicy object applies. The array of rules is applied to any pods selected by this field. An empty selector matches all pods in the policy's namespace. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is optional. If it is not specified, it defaults to an empty selector.",
"ingress": "ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)",
"egress": "egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8",
"policyTypes": "policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8",
diff --git a/vendor/k8s.io/api/networking/v1alpha1/doc.go b/vendor/k8s.io/api/networking/v1alpha1/doc.go
deleted file mode 100644
index 55264ae70..000000000
--- a/vendor/k8s.io/api/networking/v1alpha1/doc.go
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
-Copyright 2022 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// +k8s:deepcopy-gen=package
-// +k8s:protobuf-gen=package
-// +k8s:openapi-gen=true
-// +k8s:prerelease-lifecycle-gen=true
-// +groupName=networking.k8s.io
-
-package v1alpha1
diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go
deleted file mode 100644
index 0d4203483..000000000
--- a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go
+++ /dev/null
@@ -1,1929 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: k8s.io/api/networking/v1alpha1/generated.proto
-
-package v1alpha1
-
-import (
- fmt "fmt"
-
- io "io"
-
- proto "github.com/gogo/protobuf/proto"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- math "math"
- math_bits "math/bits"
- reflect "reflect"
- strings "strings"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *IPAddress) Reset() { *m = IPAddress{} }
-func (*IPAddress) ProtoMessage() {}
-func (*IPAddress) Descriptor() ([]byte, []int) {
- return fileDescriptor_c1cb39e7b48ce50d, []int{0}
-}
-func (m *IPAddress) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IPAddress) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IPAddress.Merge(m, src)
-}
-func (m *IPAddress) XXX_Size() int {
- return m.Size()
-}
-func (m *IPAddress) XXX_DiscardUnknown() {
- xxx_messageInfo_IPAddress.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IPAddress proto.InternalMessageInfo
-
-func (m *IPAddressList) Reset() { *m = IPAddressList{} }
-func (*IPAddressList) ProtoMessage() {}
-func (*IPAddressList) Descriptor() ([]byte, []int) {
- return fileDescriptor_c1cb39e7b48ce50d, []int{1}
-}
-func (m *IPAddressList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IPAddressList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IPAddressList.Merge(m, src)
-}
-func (m *IPAddressList) XXX_Size() int {
- return m.Size()
-}
-func (m *IPAddressList) XXX_DiscardUnknown() {
- xxx_messageInfo_IPAddressList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IPAddressList proto.InternalMessageInfo
-
-func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} }
-func (*IPAddressSpec) ProtoMessage() {}
-func (*IPAddressSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_c1cb39e7b48ce50d, []int{2}
-}
-func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IPAddressSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IPAddressSpec.Merge(m, src)
-}
-func (m *IPAddressSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *IPAddressSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_IPAddressSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo
-
-func (m *ParentReference) Reset() { *m = ParentReference{} }
-func (*ParentReference) ProtoMessage() {}
-func (*ParentReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_c1cb39e7b48ce50d, []int{3}
-}
-func (m *ParentReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ParentReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ParentReference.Merge(m, src)
-}
-func (m *ParentReference) XXX_Size() int {
- return m.Size()
-}
-func (m *ParentReference) XXX_DiscardUnknown() {
- xxx_messageInfo_ParentReference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ParentReference proto.InternalMessageInfo
-
-func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} }
-func (*ServiceCIDR) ProtoMessage() {}
-func (*ServiceCIDR) Descriptor() ([]byte, []int) {
- return fileDescriptor_c1cb39e7b48ce50d, []int{4}
-}
-func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceCIDR) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceCIDR.Merge(m, src)
-}
-func (m *ServiceCIDR) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceCIDR) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceCIDR.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo
-
-func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} }
-func (*ServiceCIDRList) ProtoMessage() {}
-func (*ServiceCIDRList) Descriptor() ([]byte, []int) {
- return fileDescriptor_c1cb39e7b48ce50d, []int{5}
-}
-func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceCIDRList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceCIDRList.Merge(m, src)
-}
-func (m *ServiceCIDRList) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceCIDRList) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo
-
-func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} }
-func (*ServiceCIDRSpec) ProtoMessage() {}
-func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_c1cb39e7b48ce50d, []int{6}
-}
-func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceCIDRSpec.Merge(m, src)
-}
-func (m *ServiceCIDRSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceCIDRSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo
-
-func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} }
-func (*ServiceCIDRStatus) ProtoMessage() {}
-func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_c1cb39e7b48ce50d, []int{7}
-}
-func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceCIDRStatus.Merge(m, src)
-}
-func (m *ServiceCIDRStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceCIDRStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1alpha1.IPAddress")
- proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1alpha1.IPAddressList")
- proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1alpha1.IPAddressSpec")
- proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1alpha1.ParentReference")
- proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDR")
- proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRList")
- proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRSpec")
- proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRStatus")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/networking/v1alpha1/generated.proto", fileDescriptor_c1cb39e7b48ce50d)
-}
-
-var fileDescriptor_c1cb39e7b48ce50d = []byte{
- // 634 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4a,
- 0x18, 0x8d, 0xdb, 0xa4, 0xaa, 0x27, 0xb7, 0xb7, 0xb7, 0x5e, 0x45, 0x5d, 0x38, 0x91, 0xef, 0xa6,
- 0x08, 0x3a, 0x26, 0x11, 0x42, 0x6c, 0x71, 0x2b, 0xa1, 0x4a, 0xd0, 0x96, 0xe9, 0x0a, 0xd4, 0x05,
- 0xd3, 0xc9, 0x57, 0x67, 0x08, 0xfe, 0xd1, 0xcc, 0x24, 0xc0, 0x8e, 0x47, 0xe0, 0x05, 0x78, 0x0e,
- 0x56, 0x20, 0xb1, 0xeb, 0xb2, 0xcb, 0xae, 0x2a, 0x6a, 0x5e, 0x04, 0xcd, 0xd8, 0xb1, 0x93, 0x46,
- 0xfd, 0xdb, 0x74, 0xe7, 0xef, 0xcc, 0x39, 0x67, 0xbe, 0xf3, 0xcd, 0x8c, 0x8c, 0xf0, 0xf0, 0x99,
- 0xc4, 0x3c, 0xf1, 0x69, 0xca, 0xfd, 0x18, 0xd4, 0xc7, 0x44, 0x0c, 0x79, 0x1c, 0xfa, 0xe3, 0x2e,
- 0xfd, 0x90, 0x0e, 0x68, 0xd7, 0x0f, 0x21, 0x06, 0x41, 0x15, 0xf4, 0x71, 0x2a, 0x12, 0x95, 0x38,
- 0x6e, 0xce, 0xc7, 0x34, 0xe5, 0xb8, 0xe2, 0xe3, 0x09, 0x7f, 0x7d, 0x33, 0xe4, 0x6a, 0x30, 0x3a,
- 0xc2, 0x2c, 0x89, 0xfc, 0x30, 0x09, 0x13, 0xdf, 0xc8, 0x8e, 0x46, 0xc7, 0xa6, 0x32, 0x85, 0xf9,
- 0xca, 0xed, 0xd6, 0x9f, 0x54, 0xdb, 0x47, 0x94, 0x0d, 0x78, 0x0c, 0xe2, 0xb3, 0x9f, 0x0e, 0x43,
- 0x0d, 0x48, 0x3f, 0x02, 0x45, 0xfd, 0xf1, 0x5c, 0x13, 0xeb, 0xfe, 0x55, 0x2a, 0x31, 0x8a, 0x15,
- 0x8f, 0x60, 0x4e, 0xf0, 0xf4, 0x26, 0x81, 0x64, 0x03, 0x88, 0xe8, 0x65, 0x9d, 0xf7, 0xd3, 0x42,
- 0xf6, 0xce, 0xfe, 0xf3, 0x7e, 0x5f, 0x80, 0x94, 0xce, 0x3b, 0xb4, 0xac, 0x3b, 0xea, 0x53, 0x45,
- 0x5b, 0x56, 0xc7, 0xda, 0x68, 0xf6, 0x1e, 0xe3, 0x6a, 0x1c, 0xa5, 0x31, 0x4e, 0x87, 0xa1, 0x06,
- 0x24, 0xd6, 0x6c, 0x3c, 0xee, 0xe2, 0xbd, 0xa3, 0xf7, 0xc0, 0xd4, 0x2b, 0x50, 0x34, 0x70, 0x4e,
- 0xce, 0xdb, 0xb5, 0xec, 0xbc, 0x8d, 0x2a, 0x8c, 0x94, 0xae, 0xce, 0x1e, 0xaa, 0xcb, 0x14, 0x58,
- 0x6b, 0xc1, 0xb8, 0x6f, 0xe2, 0xeb, 0x87, 0x8d, 0xcb, 0xd6, 0x0e, 0x52, 0x60, 0xc1, 0x3f, 0x85,
- 0x75, 0x5d, 0x57, 0xc4, 0x18, 0x79, 0x3f, 0x2c, 0xb4, 0x52, 0xb2, 0x5e, 0x72, 0xa9, 0x9c, 0xc3,
- 0xb9, 0x10, 0xf8, 0x76, 0x21, 0xb4, 0xda, 0x44, 0xf8, 0xaf, 0xd8, 0x67, 0x79, 0x82, 0x4c, 0x05,
- 0xd8, 0x45, 0x0d, 0xae, 0x20, 0x92, 0xad, 0x85, 0xce, 0xe2, 0x46, 0xb3, 0xf7, 0xe0, 0xd6, 0x09,
- 0x82, 0x95, 0xc2, 0xb5, 0xb1, 0xa3, 0xf5, 0x24, 0xb7, 0xf1, 0xa2, 0xa9, 0xf6, 0x75, 0x2c, 0xe7,
- 0x10, 0xd9, 0x29, 0x15, 0x10, 0x2b, 0x02, 0xc7, 0x45, 0xff, 0xfe, 0x4d, 0x9b, 0xec, 0x4f, 0x04,
- 0x20, 0x20, 0x66, 0x10, 0xac, 0x64, 0xe7, 0x6d, 0xbb, 0x04, 0x49, 0x65, 0xe8, 0x7d, 0xb7, 0xd0,
- 0xea, 0x25, 0xb6, 0xf3, 0x3f, 0x6a, 0x84, 0x22, 0x19, 0xa5, 0x66, 0x37, 0xbb, 0xea, 0xf3, 0x85,
- 0x06, 0x49, 0xbe, 0xe6, 0x3c, 0x42, 0xcb, 0x02, 0x64, 0x32, 0x12, 0x0c, 0xcc, 0xe1, 0xd9, 0xd5,
- 0x94, 0x48, 0x81, 0x93, 0x92, 0xe1, 0xf8, 0xc8, 0x8e, 0x69, 0x04, 0x32, 0xa5, 0x0c, 0x5a, 0x8b,
- 0x86, 0xbe, 0x56, 0xd0, 0xed, 0xdd, 0xc9, 0x02, 0xa9, 0x38, 0x4e, 0x07, 0xd5, 0x75, 0xd1, 0xaa,
- 0x1b, 0x6e, 0x79, 0xd0, 0x9a, 0x4b, 0xcc, 0x8a, 0xf7, 0x6d, 0x01, 0x35, 0x0f, 0x40, 0x8c, 0x39,
- 0x83, 0xad, 0x9d, 0x6d, 0x72, 0x0f, 0x77, 0xf5, 0xf5, 0xcc, 0x5d, 0xbd, 0xf1, 0x10, 0xa6, 0x9a,
- 0xbb, 0xea, 0xb6, 0x3a, 0x6f, 0xd0, 0x92, 0x54, 0x54, 0x8d, 0xa4, 0x19, 0x4a, 0xb3, 0xd7, 0xbd,
- 0x8b, 0xa9, 0x11, 0x06, 0xff, 0x16, 0xb6, 0x4b, 0x79, 0x4d, 0x0a, 0x43, 0xef, 0x97, 0x85, 0x56,
- 0xa7, 0xd8, 0xf7, 0xf0, 0x14, 0xf6, 0x67, 0x9f, 0xc2, 0xc3, 0x3b, 0x64, 0xb9, 0xe2, 0x31, 0xf4,
- 0x66, 0x22, 0x98, 0xe7, 0xd0, 0x46, 0x0d, 0xc6, 0xfb, 0x42, 0xb6, 0xac, 0xce, 0xe2, 0x86, 0x1d,
- 0xd8, 0x5a, 0xa3, 0x17, 0x25, 0xc9, 0x71, 0xef, 0x13, 0x5a, 0x9b, 0x1b, 0x92, 0xc3, 0x10, 0x62,
- 0x49, 0xdc, 0xe7, 0x8a, 0x27, 0x71, 0x2e, 0x9d, 0x3d, 0xc0, 0x6b, 0xa2, 0x6f, 0x4d, 0x74, 0xd5,
- 0xed, 0x28, 0x21, 0x49, 0xa6, 0x6c, 0x83, 0xed, 0x93, 0x0b, 0xb7, 0x76, 0x7a, 0xe1, 0xd6, 0xce,
- 0x2e, 0xdc, 0xda, 0x97, 0xcc, 0xb5, 0x4e, 0x32, 0xd7, 0x3a, 0xcd, 0x5c, 0xeb, 0x2c, 0x73, 0xad,
- 0xdf, 0x99, 0x6b, 0x7d, 0xfd, 0xe3, 0xd6, 0xde, 0xba, 0xd7, 0xff, 0x7f, 0xfe, 0x06, 0x00, 0x00,
- 0xff, 0xff, 0xb1, 0xd0, 0x33, 0x02, 0xa0, 0x06, 0x00, 0x00,
-}
-
-func (m *IPAddress) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *IPAddressList) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Items) > 0 {
- for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.ParentRef != nil {
- {
- size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ParentReference) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0x22
- i -= len(m.Namespace)
- copy(dAtA[i:], m.Namespace)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
- i--
- dAtA[i] = 0x1a
- i -= len(m.Resource)
- copy(dAtA[i:], m.Resource)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
- i--
- dAtA[i] = 0x12
- i -= len(m.Group)
- copy(dAtA[i:], m.Group)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Items) > 0 {
- for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.CIDRs) > 0 {
- for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.CIDRs[iNdEx])
- copy(dAtA[i:], m.CIDRs[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx])))
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Conditions) > 0 {
- for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
- offset -= sovGenerated(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *IPAddress) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *IPAddressList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *IPAddressSpec) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ParentRef != nil {
- l = m.ParentRef.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- return n
-}
-
-func (m *ParentReference) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Group)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Resource)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Namespace)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *ServiceCIDR) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Status.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *ServiceCIDRList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *ServiceCIDRSpec) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.CIDRs) > 0 {
- for _, s := range m.CIDRs {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *ServiceCIDRStatus) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Conditions) > 0 {
- for _, e := range m.Conditions {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func sovGenerated(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozGenerated(x uint64) (n int) {
- return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (this *IPAddress) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&IPAddress{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *IPAddressList) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForItems := "[]IPAddress{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + ","
- }
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&IPAddressList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *IPAddressSpec) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&IPAddressSpec{`,
- `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ParentReference) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&ParentReference{`,
- `Group:` + fmt.Sprintf("%v", this.Group) + `,`,
- `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
- `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ServiceCIDR) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&ServiceCIDR{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`,
- `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ServiceCIDRList) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForItems := "[]ServiceCIDR{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + ","
- }
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&ServiceCIDRList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ServiceCIDRSpec) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&ServiceCIDRSpec{`,
- `CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ServiceCIDRStatus) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForConditions := "[]Condition{"
- for _, f := range this.Conditions {
- repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
- }
- repeatedStringForConditions += "}"
- s := strings.Join([]string{`&ServiceCIDRStatus{`,
- `Conditions:` + repeatedStringForConditions + `,`,
- `}`,
- }, "")
- return s
-}
-func valueToStringGenerated(v interface{}) string {
- rv := reflect.ValueOf(v)
- if rv.IsNil() {
- return "nil"
- }
- pv := reflect.Indirect(rv).Interface()
- return fmt.Sprintf("*%v", pv)
-}
-func (m *IPAddress) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: IPAddress: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *IPAddressList) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Items = append(m.Items, IPAddress{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *IPAddressSpec) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.ParentRef == nil {
- m.ParentRef = &ParentReference{}
- }
- if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ParentReference) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ParentReference: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Group = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Resource = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Namespace = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ServiceCIDR) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Items = append(m.Items, ServiceCIDR{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Conditions = append(m.Conditions, v1.Condition{})
- if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipGenerated(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthGenerated
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupGenerated
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthGenerated
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/vendor/k8s.io/api/networking/v1alpha1/generated.proto
deleted file mode 100644
index 80ec6af73..000000000
--- a/vendor/k8s.io/api/networking/v1alpha1/generated.proto
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-
-// This file was autogenerated by go-to-protobuf. Do not edit it manually!
-
-syntax = "proto2";
-
-package k8s.io.api.networking.v1alpha1;
-
-import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
-import "k8s.io/apimachinery/pkg/runtime/generated.proto";
-import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
-
-// Package-wide variables from generator "generated".
-option go_package = "k8s.io/api/networking/v1alpha1";
-
-// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
-// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
-// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
-// the name of the object is the IP address in canonical format, four decimal digits separated
-// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
-// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
-// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
-message IPAddress {
- // Standard object's metadata.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
- // +optional
- optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
-
- // spec is the desired state of the IPAddress.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
- // +optional
- optional IPAddressSpec spec = 2;
-}
-
-// IPAddressList contains a list of IPAddress.
-message IPAddressList {
- // Standard object's metadata.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
- // +optional
- optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
-
- // items is the list of IPAddresses.
- repeated IPAddress items = 2;
-}
-
-// IPAddressSpec describe the attributes in an IP Address.
-message IPAddressSpec {
- // ParentRef references the resource that an IPAddress is attached to.
- // An IPAddress must reference a parent object.
- // +required
- optional ParentReference parentRef = 1;
-}
-
-// ParentReference describes a reference to a parent object.
-message ParentReference {
- // Group is the group of the object being referenced.
- // +optional
- optional string group = 1;
-
- // Resource is the resource of the object being referenced.
- // +required
- optional string resource = 2;
-
- // Namespace is the namespace of the object being referenced.
- // +optional
- optional string namespace = 3;
-
- // Name is the name of the object being referenced.
- // +required
- optional string name = 4;
-}
-
-// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
-// This range is used to allocate ClusterIPs to Service objects.
-message ServiceCIDR {
- // Standard object's metadata.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
- // +optional
- optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
-
- // spec is the desired state of the ServiceCIDR.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
- // +optional
- optional ServiceCIDRSpec spec = 2;
-
- // status represents the current state of the ServiceCIDR.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
- // +optional
- optional ServiceCIDRStatus status = 3;
-}
-
-// ServiceCIDRList contains a list of ServiceCIDR objects.
-message ServiceCIDRList {
- // Standard object's metadata.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
- // +optional
- optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
-
- // items is the list of ServiceCIDRs.
- repeated ServiceCIDR items = 2;
-}
-
-// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
-message ServiceCIDRSpec {
- // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
- // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
- // The network address of each CIDR, the address that identifies the subnet of a host, is reserved
- // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be
- // allocated.
- // This field is immutable.
- // +optional
- // +listType=atomic
- repeated string cidrs = 1;
-}
-
-// ServiceCIDRStatus describes the current state of the ServiceCIDR.
-message ServiceCIDRStatus {
- // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
- // Current service state
- // +optional
- // +patchMergeKey=type
- // +patchStrategy=merge
- // +listType=map
- // +listMapKey=type
- repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
-}
-
diff --git a/vendor/k8s.io/api/networking/v1alpha1/register.go b/vendor/k8s.io/api/networking/v1alpha1/register.go
deleted file mode 100644
index c8f5856b5..000000000
--- a/vendor/k8s.io/api/networking/v1alpha1/register.go
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
-Copyright 2022 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// GroupName is the group name used in this package.
-const GroupName = "networking.k8s.io"
-
-// SchemeGroupVersion is group version used to register objects in this package.
-var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
-
-// Kind takes an unqualified kind and returns a Group qualified GroupKind.
-func Kind(kind string) schema.GroupKind {
- return SchemeGroupVersion.WithKind(kind).GroupKind()
-}
-
-// Resource takes an unqualified resource and returns a Group qualified GroupResource.
-func Resource(resource string) schema.GroupResource {
- return SchemeGroupVersion.WithResource(resource).GroupResource()
-}
-
-var (
- // SchemeBuilder holds functions that add things to a scheme.
- // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
- // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
- SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
- localSchemeBuilder = &SchemeBuilder
-
- // AddToScheme adds the types of this group into the given scheme.
- AddToScheme = localSchemeBuilder.AddToScheme
-)
-
-// Adds the list of known types to the given scheme.
-func addKnownTypes(scheme *runtime.Scheme) error {
- scheme.AddKnownTypes(SchemeGroupVersion,
- &IPAddress{},
- &IPAddressList{},
- &ServiceCIDR{},
- &ServiceCIDRList{},
- )
- metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
- return nil
-}
diff --git a/vendor/k8s.io/api/networking/v1alpha1/types.go b/vendor/k8s.io/api/networking/v1alpha1/types.go
deleted file mode 100644
index 0e454f026..000000000
--- a/vendor/k8s.io/api/networking/v1alpha1/types.go
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
-Copyright 2022 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.27
-
-// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
-// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
-// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
-// the name of the object is the IP address in canonical format, four decimal digits separated
-// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
-// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
-// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
-type IPAddress struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
- // +optional
- metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- // spec is the desired state of the IPAddress.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
- // +optional
- Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
-}
-
-// IPAddressSpec describe the attributes in an IP Address.
-type IPAddressSpec struct {
- // ParentRef references the resource that an IPAddress is attached to.
- // An IPAddress must reference a parent object.
- // +required
- ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"`
-}
-
-// ParentReference describes a reference to a parent object.
-type ParentReference struct {
- // Group is the group of the object being referenced.
- // +optional
- Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"`
- // Resource is the resource of the object being referenced.
- // +required
- Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"`
- // Namespace is the namespace of the object being referenced.
- // +optional
- Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
- // Name is the name of the object being referenced.
- // +required
- Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.27
-
-// IPAddressList contains a list of IPAddress.
-type IPAddressList struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
- // +optional
- metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- // items is the list of IPAddresses.
- Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"`
-}
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.27
-
-// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
-// This range is used to allocate ClusterIPs to Service objects.
-type ServiceCIDR struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
- // +optional
- metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- // spec is the desired state of the ServiceCIDR.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
- // +optional
- Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
- // status represents the current state of the ServiceCIDR.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
- // +optional
- Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
-}
-
-// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
-type ServiceCIDRSpec struct {
- // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
- // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
- // The network address of each CIDR, the address that identifies the subnet of a host, is reserved
- // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be
- // allocated.
- // This field is immutable.
- // +optional
- // +listType=atomic
- CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"`
-}
-
-const (
- // ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the
- // apiserver to allocate ClusterIPs for Services.
- ServiceCIDRConditionReady = "Ready"
- // ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is
- // being deleted.
- ServiceCIDRReasonTerminating = "Terminating"
-)
-
-// ServiceCIDRStatus describes the current state of the ServiceCIDR.
-type ServiceCIDRStatus struct {
- // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
- // Current service state
- // +optional
- // +patchMergeKey=type
- // +patchStrategy=merge
- // +listType=map
- // +listMapKey=type
- Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.27
-
-// ServiceCIDRList contains a list of ServiceCIDR objects.
-type ServiceCIDRList struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
- // +optional
- metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- // items is the list of ServiceCIDRs.
- Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"`
-}
diff --git a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go
deleted file mode 100644
index 4c8eb57a7..000000000
--- a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-// This file contains a collection of methods that can be used from go-restful to
-// generate Swagger API documentation for its models. Please read this PR for more
-// information on the implementation: https://github.com/emicklei/go-restful/pull/215
-//
-// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
-// they are on one line! For multiple line or blocks that you want to ignore use ---.
-// Any context after a --- is ignored.
-//
-// Those methods can be generated by using hack/update-codegen.sh
-
-// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
-var map_IPAddress = map[string]string{
- "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1",
- "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "spec": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
-}
-
-func (IPAddress) SwaggerDoc() map[string]string {
- return map_IPAddress
-}
-
-var map_IPAddressList = map[string]string{
- "": "IPAddressList contains a list of IPAddress.",
- "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "items": "items is the list of IPAddresses.",
-}
-
-func (IPAddressList) SwaggerDoc() map[string]string {
- return map_IPAddressList
-}
-
-var map_IPAddressSpec = map[string]string{
- "": "IPAddressSpec describe the attributes in an IP Address.",
- "parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.",
-}
-
-func (IPAddressSpec) SwaggerDoc() map[string]string {
- return map_IPAddressSpec
-}
-
-var map_ParentReference = map[string]string{
- "": "ParentReference describes a reference to a parent object.",
- "group": "Group is the group of the object being referenced.",
- "resource": "Resource is the resource of the object being referenced.",
- "namespace": "Namespace is the namespace of the object being referenced.",
- "name": "Name is the name of the object being referenced.",
-}
-
-func (ParentReference) SwaggerDoc() map[string]string {
- return map_ParentReference
-}
-
-var map_ServiceCIDR = map[string]string{
- "": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.",
- "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "spec": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
- "status": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
-}
-
-func (ServiceCIDR) SwaggerDoc() map[string]string {
- return map_ServiceCIDR
-}
-
-var map_ServiceCIDRList = map[string]string{
- "": "ServiceCIDRList contains a list of ServiceCIDR objects.",
- "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "items": "items is the list of ServiceCIDRs.",
-}
-
-func (ServiceCIDRList) SwaggerDoc() map[string]string {
- return map_ServiceCIDRList
-}
-
-var map_ServiceCIDRSpec = map[string]string{
- "": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.",
- "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. The network address of each CIDR, the address that identifies the subnet of a host, is reserved and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be allocated. This field is immutable.",
-}
-
-func (ServiceCIDRSpec) SwaggerDoc() map[string]string {
- return map_ServiceCIDRSpec
-}
-
-var map_ServiceCIDRStatus = map[string]string{
- "": "ServiceCIDRStatus describes the current state of the ServiceCIDR.",
- "conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state",
-}
-
-func (ServiceCIDRStatus) SwaggerDoc() map[string]string {
- return map_ServiceCIDRStatus
-}
-
-// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go b/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go
deleted file mode 100644
index 5f9c23f70..000000000
--- a/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
-Copyright 2023 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-const (
-
- // TODO: Use IPFamily as field with a field selector,And the value is set based on
- // the name at create time and immutable.
- // LabelIPAddressFamily is used to indicate the IP family of a Kubernetes IPAddress.
- // This label simplify dual-stack client operations allowing to obtain the list of
- // IP addresses filtered by family.
- LabelIPAddressFamily = "ipaddress.kubernetes.io/ip-family"
- // LabelManagedBy is used to indicate the controller or entity that manages
- // an IPAddress. This label aims to enable different IPAddress
- // objects to be managed by different controllers or entities within the
- // same cluster. It is highly recommended to configure this label for all
- // IPAddress objects.
- LabelManagedBy = "ipaddress.kubernetes.io/managed-by"
-)
diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go
deleted file mode 100644
index 5c8f697ba..000000000
--- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,229 +0,0 @@
-//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by deepcopy-gen. DO NOT EDIT.
-
-package v1alpha1
-
-import (
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IPAddress) DeepCopyInto(out *IPAddress) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress.
-func (in *IPAddress) DeepCopy() *IPAddress {
- if in == nil {
- return nil
- }
- out := new(IPAddress)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *IPAddress) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IPAddressList) DeepCopyInto(out *IPAddressList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]IPAddress, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList.
-func (in *IPAddressList) DeepCopy() *IPAddressList {
- if in == nil {
- return nil
- }
- out := new(IPAddressList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *IPAddressList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) {
- *out = *in
- if in.ParentRef != nil {
- in, out := &in.ParentRef, &out.ParentRef
- *out = new(ParentReference)
- **out = **in
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec.
-func (in *IPAddressSpec) DeepCopy() *IPAddressSpec {
- if in == nil {
- return nil
- }
- out := new(IPAddressSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ParentReference) DeepCopyInto(out *ParentReference) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference.
-func (in *ParentReference) DeepCopy() *ParentReference {
- if in == nil {
- return nil
- }
- out := new(ParentReference)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR.
-func (in *ServiceCIDR) DeepCopy() *ServiceCIDR {
- if in == nil {
- return nil
- }
- out := new(ServiceCIDR)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ServiceCIDR) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]ServiceCIDR, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList.
-func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList {
- if in == nil {
- return nil
- }
- out := new(ServiceCIDRList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ServiceCIDRList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) {
- *out = *in
- if in.CIDRs != nil {
- in, out := &in.CIDRs, &out.CIDRs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec.
-func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec {
- if in == nil {
- return nil
- }
- out := new(ServiceCIDRSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) {
- *out = *in
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make([]v1.Condition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus.
-func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus {
- if in == nil {
- return nil
- }
- out := new(ServiceCIDRStatus)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go
deleted file mode 100644
index 714e7b625..000000000
--- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go
+++ /dev/null
@@ -1,94 +0,0 @@
-//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
-
-package v1alpha1
-
-// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
-func (in *IPAddress) APILifecycleIntroduced() (major, minor int) {
- return 1, 27
-}
-
-// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
-func (in *IPAddress) APILifecycleDeprecated() (major, minor int) {
- return 1, 30
-}
-
-// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
-func (in *IPAddress) APILifecycleRemoved() (major, minor int) {
- return 1, 33
-}
-
-// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
-func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) {
- return 1, 27
-}
-
-// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
-func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) {
- return 1, 30
-}
-
-// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
-func (in *IPAddressList) APILifecycleRemoved() (major, minor int) {
- return 1, 33
-}
-
-// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
-func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) {
- return 1, 27
-}
-
-// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
-func (in *ServiceCIDR) APILifecycleDeprecated() (major, minor int) {
- return 1, 30
-}
-
-// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
-func (in *ServiceCIDR) APILifecycleRemoved() (major, minor int) {
- return 1, 33
-}
-
-// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
-func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) {
- return 1, 27
-}
-
-// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
-func (in *ServiceCIDRList) APILifecycleDeprecated() (major, minor int) {
- return 1, 30
-}
-
-// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
-func (in *ServiceCIDRList) APILifecycleRemoved() (major, minor int) {
- return 1, 33
-}
diff --git a/vendor/k8s.io/api/resource/v1alpha3/devicetaint.go b/vendor/k8s.io/api/resource/v1alpha3/devicetaint.go
new file mode 100644
index 000000000..dd3c65d26
--- /dev/null
+++ b/vendor/k8s.io/api/resource/v1alpha3/devicetaint.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha3
+
+import "fmt"
+
+var _ fmt.Stringer = DeviceTaint{}
+
+// String converts to a string in the format '=:', '=:', ':', or ''.
+func (t DeviceTaint) String() string {
+ if len(t.Effect) == 0 {
+ if len(t.Value) == 0 {
+ return fmt.Sprintf("%v", t.Key)
+ }
+ return fmt.Sprintf("%v=%v:", t.Key, t.Value)
+ }
+ if len(t.Value) == 0 {
+ return fmt.Sprintf("%v:%v", t.Key, t.Effect)
+ }
+ return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect)
+}
diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go
index 716492fea..dc37717ef 100644
--- a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go
+++ b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go
@@ -25,18 +25,12 @@ import (
io "io"
proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
- v11 "k8s.io/api/core/v1"
- resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
-
- k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
)
// Reference imports to suppress errors if they are not otherwise used.
@@ -50,15 +44,15 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-func (m *AllocatedDeviceStatus) Reset() { *m = AllocatedDeviceStatus{} }
-func (*AllocatedDeviceStatus) ProtoMessage() {}
-func (*AllocatedDeviceStatus) Descriptor() ([]byte, []int) {
+func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} }
+func (*CELDeviceSelector) ProtoMessage() {}
+func (*CELDeviceSelector) Descriptor() ([]byte, []int) {
return fileDescriptor_66649ee9bbcd89d2, []int{0}
}
-func (m *AllocatedDeviceStatus) XXX_Unmarshal(b []byte) error {
+func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *AllocatedDeviceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
@@ -66,27 +60,27 @@ func (m *AllocatedDeviceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byt
}
return b[:n], nil
}
-func (m *AllocatedDeviceStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AllocatedDeviceStatus.Merge(m, src)
+func (m *CELDeviceSelector) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CELDeviceSelector.Merge(m, src)
}
-func (m *AllocatedDeviceStatus) XXX_Size() int {
+func (m *CELDeviceSelector) XXX_Size() int {
return m.Size()
}
-func (m *AllocatedDeviceStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_AllocatedDeviceStatus.DiscardUnknown(m)
+func (m *CELDeviceSelector) XXX_DiscardUnknown() {
+ xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m)
}
-var xxx_messageInfo_AllocatedDeviceStatus proto.InternalMessageInfo
+var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo
-func (m *AllocationResult) Reset() { *m = AllocationResult{} }
-func (*AllocationResult) ProtoMessage() {}
-func (*AllocationResult) Descriptor() ([]byte, []int) {
+func (m *DeviceSelector) Reset() { *m = DeviceSelector{} }
+func (*DeviceSelector) ProtoMessage() {}
+func (*DeviceSelector) Descriptor() ([]byte, []int) {
return fileDescriptor_66649ee9bbcd89d2, []int{1}
}
-func (m *AllocationResult) XXX_Unmarshal(b []byte) error {
+func (m *DeviceSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
@@ -94,27 +88,27 @@ func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, er
}
return b[:n], nil
}
-func (m *AllocationResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AllocationResult.Merge(m, src)
+func (m *DeviceSelector) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceSelector.Merge(m, src)
}
-func (m *AllocationResult) XXX_Size() int {
+func (m *DeviceSelector) XXX_Size() int {
return m.Size()
}
-func (m *AllocationResult) XXX_DiscardUnknown() {
- xxx_messageInfo_AllocationResult.DiscardUnknown(m)
+func (m *DeviceSelector) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceSelector.DiscardUnknown(m)
}
-var xxx_messageInfo_AllocationResult proto.InternalMessageInfo
+var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo
-func (m *BasicDevice) Reset() { *m = BasicDevice{} }
-func (*BasicDevice) ProtoMessage() {}
-func (*BasicDevice) Descriptor() ([]byte, []int) {
+func (m *DeviceTaint) Reset() { *m = DeviceTaint{} }
+func (*DeviceTaint) ProtoMessage() {}
+func (*DeviceTaint) Descriptor() ([]byte, []int) {
return fileDescriptor_66649ee9bbcd89d2, []int{2}
}
-func (m *BasicDevice) XXX_Unmarshal(b []byte) error {
+func (m *DeviceTaint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *BasicDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *DeviceTaint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
@@ -122,27 +116,27 @@ func (m *BasicDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
}
return b[:n], nil
}
-func (m *BasicDevice) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BasicDevice.Merge(m, src)
+func (m *DeviceTaint) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceTaint.Merge(m, src)
}
-func (m *BasicDevice) XXX_Size() int {
+func (m *DeviceTaint) XXX_Size() int {
return m.Size()
}
-func (m *BasicDevice) XXX_DiscardUnknown() {
- xxx_messageInfo_BasicDevice.DiscardUnknown(m)
+func (m *DeviceTaint) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceTaint.DiscardUnknown(m)
}
-var xxx_messageInfo_BasicDevice proto.InternalMessageInfo
+var xxx_messageInfo_DeviceTaint proto.InternalMessageInfo
-func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} }
-func (*CELDeviceSelector) ProtoMessage() {}
-func (*CELDeviceSelector) Descriptor() ([]byte, []int) {
+func (m *DeviceTaintRule) Reset() { *m = DeviceTaintRule{} }
+func (*DeviceTaintRule) ProtoMessage() {}
+func (*DeviceTaintRule) Descriptor() ([]byte, []int) {
return fileDescriptor_66649ee9bbcd89d2, []int{3}
}
-func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error {
+func (m *DeviceTaintRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *DeviceTaintRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
@@ -150,27 +144,27 @@ func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
}
return b[:n], nil
}
-func (m *CELDeviceSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CELDeviceSelector.Merge(m, src)
+func (m *DeviceTaintRule) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceTaintRule.Merge(m, src)
}
-func (m *CELDeviceSelector) XXX_Size() int {
+func (m *DeviceTaintRule) XXX_Size() int {
return m.Size()
}
-func (m *CELDeviceSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m)
+func (m *DeviceTaintRule) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceTaintRule.DiscardUnknown(m)
}
-var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo
+var xxx_messageInfo_DeviceTaintRule proto.InternalMessageInfo
-func (m *Counter) Reset() { *m = Counter{} }
-func (*Counter) ProtoMessage() {}
-func (*Counter) Descriptor() ([]byte, []int) {
+func (m *DeviceTaintRuleList) Reset() { *m = DeviceTaintRuleList{} }
+func (*DeviceTaintRuleList) ProtoMessage() {}
+func (*DeviceTaintRuleList) Descriptor() ([]byte, []int) {
return fileDescriptor_66649ee9bbcd89d2, []int{4}
}
-func (m *Counter) XXX_Unmarshal(b []byte) error {
+func (m *DeviceTaintRuleList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *DeviceTaintRuleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
@@ -178,27 +172,27 @@ func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
}
return b[:n], nil
}
-func (m *Counter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Counter.Merge(m, src)
+func (m *DeviceTaintRuleList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceTaintRuleList.Merge(m, src)
}
-func (m *Counter) XXX_Size() int {
+func (m *DeviceTaintRuleList) XXX_Size() int {
return m.Size()
}
-func (m *Counter) XXX_DiscardUnknown() {
- xxx_messageInfo_Counter.DiscardUnknown(m)
+func (m *DeviceTaintRuleList) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceTaintRuleList.DiscardUnknown(m)
}
-var xxx_messageInfo_Counter proto.InternalMessageInfo
+var xxx_messageInfo_DeviceTaintRuleList proto.InternalMessageInfo
-func (m *CounterSet) Reset() { *m = CounterSet{} }
-func (*CounterSet) ProtoMessage() {}
-func (*CounterSet) Descriptor() ([]byte, []int) {
+func (m *DeviceTaintRuleSpec) Reset() { *m = DeviceTaintRuleSpec{} }
+func (*DeviceTaintRuleSpec) ProtoMessage() {}
+func (*DeviceTaintRuleSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_66649ee9bbcd89d2, []int{5}
}
-func (m *CounterSet) XXX_Unmarshal(b []byte) error {
+func (m *DeviceTaintRuleSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *CounterSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *DeviceTaintRuleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
@@ -206,27 +200,27 @@ func (m *CounterSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
}
return b[:n], nil
}
-func (m *CounterSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CounterSet.Merge(m, src)
+func (m *DeviceTaintRuleSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceTaintRuleSpec.Merge(m, src)
}
-func (m *CounterSet) XXX_Size() int {
+func (m *DeviceTaintRuleSpec) XXX_Size() int {
return m.Size()
}
-func (m *CounterSet) XXX_DiscardUnknown() {
- xxx_messageInfo_CounterSet.DiscardUnknown(m)
+func (m *DeviceTaintRuleSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceTaintRuleSpec.DiscardUnknown(m)
}
-var xxx_messageInfo_CounterSet proto.InternalMessageInfo
+var xxx_messageInfo_DeviceTaintRuleSpec proto.InternalMessageInfo
-func (m *Device) Reset() { *m = Device{} }
-func (*Device) ProtoMessage() {}
-func (*Device) Descriptor() ([]byte, []int) {
+func (m *DeviceTaintSelector) Reset() { *m = DeviceTaintSelector{} }
+func (*DeviceTaintSelector) ProtoMessage() {}
+func (*DeviceTaintSelector) Descriptor() ([]byte, []int) {
return fileDescriptor_66649ee9bbcd89d2, []int{6}
}
-func (m *Device) XXX_Unmarshal(b []byte) error {
+func (m *DeviceTaintSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *DeviceTaintSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
@@ -234,10718 +228,616 @@ func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
}
return b[:n], nil
}
-func (m *Device) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Device.Merge(m, src)
+func (m *DeviceTaintSelector) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceTaintSelector.Merge(m, src)
}
-func (m *Device) XXX_Size() int {
+func (m *DeviceTaintSelector) XXX_Size() int {
return m.Size()
}
-func (m *Device) XXX_DiscardUnknown() {
- xxx_messageInfo_Device.DiscardUnknown(m)
+func (m *DeviceTaintSelector) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceTaintSelector.DiscardUnknown(m)
}
-var xxx_messageInfo_Device proto.InternalMessageInfo
+var xxx_messageInfo_DeviceTaintSelector proto.InternalMessageInfo
-func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} }
-func (*DeviceAllocationConfiguration) ProtoMessage() {}
-func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{7}
-}
-func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceAllocationConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceAllocationConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceAllocationConfiguration.Merge(m, src)
-}
-func (m *DeviceAllocationConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceAllocationConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceAllocationConfiguration.DiscardUnknown(m)
+func init() {
+ proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.CELDeviceSelector")
+ proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceSelector")
+ proto.RegisterType((*DeviceTaint)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaint")
+ proto.RegisterType((*DeviceTaintRule)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintRule")
+ proto.RegisterType((*DeviceTaintRuleList)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintRuleList")
+ proto.RegisterType((*DeviceTaintRuleSpec)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintRuleSpec")
+ proto.RegisterType((*DeviceTaintSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintSelector")
}
-var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo
-
-func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} }
-func (*DeviceAllocationResult) ProtoMessage() {}
-func (*DeviceAllocationResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{8}
+func init() {
+ proto.RegisterFile("k8s.io/api/resource/v1alpha3/generated.proto", fileDescriptor_66649ee9bbcd89d2)
}
-func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
+
+var fileDescriptor_66649ee9bbcd89d2 = []byte{
+ // 716 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xbf, 0x6f, 0xd3, 0x40,
+ 0x14, 0x8e, 0x9b, 0xa4, 0x24, 0x17, 0xda, 0xd2, 0xeb, 0x12, 0x55, 0xc5, 0xae, 0xdc, 0xa5, 0xa0,
+ 0xd6, 0x26, 0x01, 0x21, 0x04, 0x62, 0x20, 0x6d, 0x84, 0x80, 0x52, 0xd0, 0xb5, 0x02, 0x09, 0x15,
+ 0x89, 0xab, 0xf3, 0x9a, 0x98, 0xd8, 0xb1, 0xe5, 0x73, 0x22, 0xba, 0xf5, 0x4f, 0x60, 0x84, 0x8d,
+ 0xff, 0x86, 0x8c, 0x1d, 0x18, 0x3a, 0xa0, 0x88, 0x9a, 0xbf, 0x80, 0x95, 0x09, 0xdd, 0xf9, 0x12,
+ 0xa7, 0x8e, 0x28, 0x61, 0x8b, 0xbf, 0xfb, 0xde, 0xf7, 0xde, 0xf7, 0x7e, 0x28, 0x68, 0xa3, 0x7d,
+ 0x8f, 0x19, 0xb6, 0x67, 0x52, 0xdf, 0x36, 0x03, 0x60, 0x5e, 0x37, 0xb0, 0xc0, 0xec, 0x55, 0xa8,
+ 0xe3, 0xb7, 0xe8, 0x6d, 0xb3, 0x09, 0x1d, 0x08, 0x68, 0x08, 0x0d, 0xc3, 0x0f, 0xbc, 0xd0, 0xc3,
+ 0x2b, 0x31, 0xdb, 0xa0, 0xbe, 0x6d, 0x0c, 0xd9, 0xc6, 0x90, 0xbd, 0xbc, 0xd9, 0xb4, 0xc3, 0x56,
+ 0xf7, 0xd0, 0xb0, 0x3c, 0xd7, 0x6c, 0x7a, 0x4d, 0xcf, 0x14, 0x41, 0x87, 0xdd, 0x23, 0xf1, 0x25,
+ 0x3e, 0xc4, 0xaf, 0x58, 0x6c, 0xf9, 0x4e, 0x92, 0xda, 0xa5, 0x56, 0xcb, 0xee, 0x40, 0x70, 0x6c,
+ 0xfa, 0xed, 0x26, 0x07, 0x98, 0xe9, 0x42, 0x48, 0xcd, 0x5e, 0x25, 0x5d, 0xc2, 0xb2, 0xf9, 0xb7,
+ 0xa8, 0xa0, 0xdb, 0x09, 0x6d, 0x17, 0x26, 0x02, 0xee, 0xfe, 0x2b, 0x80, 0x59, 0x2d, 0x70, 0x69,
+ 0x3a, 0x4e, 0x7f, 0x8c, 0x16, 0xb7, 0xea, 0x3b, 0xdb, 0xd0, 0xb3, 0x2d, 0xd8, 0x03, 0x07, 0xac,
+ 0xd0, 0x0b, 0x70, 0x15, 0x21, 0xf8, 0xe0, 0x07, 0xc0, 0x98, 0xed, 0x75, 0xca, 0xca, 0xaa, 0xb2,
+ 0x5e, 0xac, 0xe1, 0xfe, 0x40, 0xcb, 0x44, 0x03, 0x0d, 0xd5, 0x47, 0x2f, 0x64, 0x8c, 0xa5, 0x1f,
+ 0xa0, 0xf9, 0x94, 0xca, 0x53, 0x94, 0xb5, 0xc0, 0x11, 0xe1, 0xa5, 0xaa, 0x69, 0x5c, 0xd6, 0x54,
+ 0x63, 0xa2, 0x86, 0xda, 0x95, 0x68, 0xa0, 0x65, 0xb7, 0xea, 0x3b, 0x84, 0x8b, 0xe8, 0xbf, 0x14,
+ 0x54, 0x8a, 0x09, 0xfb, 0xd4, 0xee, 0x84, 0xf8, 0x3a, 0xca, 0xb6, 0xe1, 0x58, 0x96, 0x56, 0x92,
+ 0xa5, 0x65, 0x9f, 0xc1, 0x31, 0xe1, 0x38, 0x5e, 0x43, 0xf9, 0x1e, 0x75, 0xba, 0x50, 0x9e, 0x11,
+ 0x84, 0x39, 0x49, 0xc8, 0xbf, 0xe2, 0x20, 0x89, 0xdf, 0xf0, 0x03, 0x34, 0x0b, 0x47, 0x47, 0x60,
+ 0x85, 0xe5, 0xac, 0x60, 0xad, 0x49, 0xd6, 0x6c, 0x5d, 0xa0, 0xbf, 0x07, 0xda, 0xe2, 0x58, 0xca,
+ 0x18, 0x24, 0x32, 0x04, 0xbf, 0x46, 0x45, 0xde, 0xd6, 0x47, 0x8d, 0x06, 0x34, 0xca, 0x39, 0x61,
+ 0xf1, 0xe6, 0x98, 0xc5, 0xd1, 0x0c, 0x0c, 0xbf, 0xdd, 0xe4, 0x00, 0x33, 0xf8, 0xa8, 0x8d, 0x5e,
+ 0xc5, 0xd8, 0xb7, 0x5d, 0xa8, 0xcd, 0x45, 0x03, 0xad, 0xb8, 0x3f, 0x14, 0x20, 0x89, 0xd6, 0xfd,
+ 0xc2, 0xa7, 0x2f, 0x5a, 0xe6, 0xe4, 0xfb, 0x6a, 0x46, 0xef, 0x2b, 0x68, 0x61, 0xac, 0x00, 0xd2,
+ 0x75, 0x00, 0xbf, 0x43, 0x05, 0xae, 0xd3, 0xa0, 0x21, 0x95, 0x8d, 0xbd, 0x35, 0x5d, 0xd6, 0x17,
+ 0x87, 0xef, 0xc1, 0x0a, 0x9f, 0x43, 0x48, 0x93, 0x49, 0x26, 0x18, 0x19, 0xa9, 0xe2, 0x3d, 0x94,
+ 0x63, 0x3e, 0x58, 0xa2, 0x73, 0xa5, 0x6a, 0xe5, 0xf2, 0xb1, 0xa5, 0xca, 0xdb, 0xf3, 0xc1, 0xaa,
+ 0x5d, 0x95, 0xf2, 0x39, 0xfe, 0x45, 0x84, 0x98, 0xfe, 0x55, 0x41, 0x4b, 0x29, 0xee, 0x8e, 0xcd,
+ 0x42, 0x7c, 0x30, 0x61, 0xc7, 0x98, 0xce, 0x0e, 0x8f, 0x16, 0x66, 0xae, 0xc9, 0x6c, 0x85, 0x21,
+ 0x32, 0x66, 0x85, 0xa0, 0xbc, 0x1d, 0x82, 0xcb, 0xca, 0x33, 0xab, 0xd9, 0xf5, 0x52, 0x75, 0xf3,
+ 0xbf, 0xbc, 0x24, 0x4b, 0xf3, 0x84, 0x6b, 0x90, 0x58, 0x4a, 0xff, 0x36, 0xe9, 0x84, 0xfb, 0xc4,
+ 0x2e, 0x9a, 0x6f, 0x5c, 0x58, 0x60, 0xe9, 0x67, 0xfa, 0x06, 0x8e, 0x36, 0x1f, 0x47, 0x03, 0x2d,
+ 0x75, 0x4b, 0x24, 0x25, 0x8e, 0x77, 0x51, 0x3e, 0xe4, 0x41, 0x72, 0x4c, 0x37, 0xa6, 0xce, 0x92,
+ 0xd8, 0x8a, 0xeb, 0x8f, 0x65, 0xf4, 0xcf, 0x33, 0x17, 0x6c, 0x8d, 0xf2, 0x3c, 0x44, 0x0b, 0x71,
+ 0xe6, 0x2d, 0x87, 0x32, 0xb6, 0x4b, 0x5d, 0x90, 0x37, 0xb7, 0x14, 0x0d, 0x34, 0xb9, 0x9d, 0xa3,
+ 0x27, 0x92, 0xe6, 0x62, 0x1d, 0xcd, 0x36, 0x02, 0xbb, 0x07, 0x81, 0x3c, 0x44, 0xc4, 0xcf, 0x6b,
+ 0x5b, 0x20, 0x44, 0xbe, 0xe0, 0x15, 0x94, 0xf3, 0x3d, 0xcf, 0x91, 0x47, 0x58, 0xe0, 0x9b, 0xf3,
+ 0xd2, 0xf3, 0x1c, 0x22, 0x50, 0xa1, 0x20, 0x44, 0xc5, 0x91, 0x0d, 0x15, 0x04, 0x42, 0xe4, 0x0b,
+ 0x7e, 0x8b, 0x8a, 0x4c, 0x16, 0xcc, 0xca, 0x79, 0x31, 0xeb, 0x8d, 0x69, 0x1a, 0x32, 0xea, 0xf8,
+ 0xa2, 0xec, 0x49, 0x71, 0x88, 0x30, 0x92, 0x28, 0xd6, 0x6a, 0xfd, 0x73, 0x35, 0x73, 0x7a, 0xae,
+ 0x66, 0xce, 0xce, 0xd5, 0xcc, 0x49, 0xa4, 0x2a, 0xfd, 0x48, 0x55, 0x4e, 0x23, 0x55, 0x39, 0x8b,
+ 0x54, 0xe5, 0x47, 0xa4, 0x2a, 0x1f, 0x7f, 0xaa, 0x99, 0x37, 0x2b, 0x97, 0xfd, 0xc5, 0xfc, 0x09,
+ 0x00, 0x00, 0xff, 0xff, 0x7e, 0xb1, 0x06, 0x7b, 0x81, 0x06, 0x00, 0x00,
}
-func (m *DeviceAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
+
+func (m *CELDeviceSelector) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
- return b[:n], nil
-}
-func (m *DeviceAllocationResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceAllocationResult.Merge(m, src)
-}
-func (m *DeviceAllocationResult) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceAllocationResult) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceAllocationResult.DiscardUnknown(m)
+ return dAtA[:n], nil
}
-var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo
-
-func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} }
-func (*DeviceAttribute) ProtoMessage() {}
-func (*DeviceAttribute) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{9}
+func (m *CELDeviceSelector) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
+
+func (m *CELDeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Expression)
+ copy(dAtA[i:], m.Expression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *DeviceAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
+
+func (m *DeviceSelector) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
- return b[:n], nil
-}
-func (m *DeviceAttribute) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceAttribute.Merge(m, src)
-}
-func (m *DeviceAttribute) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceAttribute) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceAttribute.DiscardUnknown(m)
+ return dAtA[:n], nil
}
-var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo
-
-func (m *DeviceClaim) Reset() { *m = DeviceClaim{} }
-func (*DeviceClaim) ProtoMessage() {}
-func (*DeviceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{10}
-}
-func (m *DeviceClaim) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
+func (m *DeviceSelector) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *DeviceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+
+func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.CEL != nil {
+ {
+ size, err := m.CEL.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
}
- return b[:n], nil
-}
-func (m *DeviceClaim) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClaim.Merge(m, src)
-}
-func (m *DeviceClaim) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClaim) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClaim.DiscardUnknown(m)
+ return len(dAtA) - i, nil
}
-var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo
-
-func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} }
-func (*DeviceClaimConfiguration) ProtoMessage() {}
-func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{11}
-}
-func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClaimConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
+func (m *DeviceTaint) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
- return b[:n], nil
-}
-func (m *DeviceClaimConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClaimConfiguration.Merge(m, src)
-}
-func (m *DeviceClaimConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClaimConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClaimConfiguration.DiscardUnknown(m)
+ return dAtA[:n], nil
}
-var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo
-
-func (m *DeviceClass) Reset() { *m = DeviceClass{} }
-func (*DeviceClass) ProtoMessage() {}
-func (*DeviceClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{12}
+func (m *DeviceTaint) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *DeviceClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
+
+func (m *DeviceTaint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.TimeAdded != nil {
+ {
+ size, err := m.TimeAdded.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ i -= len(m.Effect)
+ copy(dAtA[i:], m.Effect)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *DeviceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
+
+func (m *DeviceTaintRule) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
- return b[:n], nil
-}
-func (m *DeviceClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClass.Merge(m, src)
-}
-func (m *DeviceClass) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClass) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClass.DiscardUnknown(m)
+ return dAtA[:n], nil
}
-var xxx_messageInfo_DeviceClass proto.InternalMessageInfo
-
-func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} }
-func (*DeviceClassConfiguration) ProtoMessage() {}
-func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{13}
-}
-func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClassConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClassConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClassConfiguration.Merge(m, src)
-}
-func (m *DeviceClassConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClassConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClassConfiguration.DiscardUnknown(m)
+func (m *DeviceTaintRule) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo
-
-func (m *DeviceClassList) Reset() { *m = DeviceClassList{} }
-func (*DeviceClassList) ProtoMessage() {}
-func (*DeviceClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{14}
-}
-func (m *DeviceClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func (m *DeviceTaintRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- return b[:n], nil
-}
-func (m *DeviceClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClassList.Merge(m, src)
-}
-func (m *DeviceClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClassList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo
-
-func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} }
-func (*DeviceClassSpec) ProtoMessage() {}
-func (*DeviceClassSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{15}
-}
-func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- return b[:n], nil
-}
-func (m *DeviceClassSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClassSpec.Merge(m, src)
-}
-func (m *DeviceClassSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClassSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClassSpec.DiscardUnknown(m)
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo
-
-func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} }
-func (*DeviceConfiguration) ProtoMessage() {}
-func (*DeviceConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{16}
-}
-func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
+func (m *DeviceTaintRuleList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
- return b[:n], nil
-}
-func (m *DeviceConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceConfiguration.Merge(m, src)
-}
-func (m *DeviceConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceConfiguration.DiscardUnknown(m)
+ return dAtA[:n], nil
}
-var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo
-
-func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} }
-func (*DeviceConstraint) ProtoMessage() {}
-func (*DeviceConstraint) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{17}
-}
-func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceConstraint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceConstraint.Merge(m, src)
-}
-func (m *DeviceConstraint) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceConstraint) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceConstraint.DiscardUnknown(m)
+func (m *DeviceTaintRuleList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo
-
-func (m *DeviceCounterConsumption) Reset() { *m = DeviceCounterConsumption{} }
-func (*DeviceCounterConsumption) ProtoMessage() {}
-func (*DeviceCounterConsumption) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{18}
-}
-func (m *DeviceCounterConsumption) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceCounterConsumption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func (m *DeviceTaintRuleList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
}
- return b[:n], nil
-}
-func (m *DeviceCounterConsumption) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceCounterConsumption.Merge(m, src)
-}
-func (m *DeviceCounterConsumption) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceCounterConsumption) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceCounterConsumption.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceCounterConsumption proto.InternalMessageInfo
-
-func (m *DeviceRequest) Reset() { *m = DeviceRequest{} }
-func (*DeviceRequest) ProtoMessage() {}
-func (*DeviceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{19}
-}
-func (m *DeviceRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- return b[:n], nil
-}
-func (m *DeviceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceRequest.Merge(m, src)
-}
-func (m *DeviceRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceRequest.DiscardUnknown(m)
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo
-
-func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} }
-func (*DeviceRequestAllocationResult) ProtoMessage() {}
-func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{20}
-}
-func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceRequestAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
+func (m *DeviceTaintRuleSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
- return b[:n], nil
-}
-func (m *DeviceRequestAllocationResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceRequestAllocationResult.Merge(m, src)
-}
-func (m *DeviceRequestAllocationResult) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceRequestAllocationResult) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceRequestAllocationResult.DiscardUnknown(m)
+ return dAtA[:n], nil
}
-var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo
-
-func (m *DeviceSelector) Reset() { *m = DeviceSelector{} }
-func (*DeviceSelector) ProtoMessage() {}
-func (*DeviceSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{21}
-}
-func (m *DeviceSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
+func (m *DeviceTaintRuleSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+
+func (m *DeviceTaintRuleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Taint.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- return b[:n], nil
-}
-func (m *DeviceSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceSelector.Merge(m, src)
-}
-func (m *DeviceSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceSelector.DiscardUnknown(m)
+ i--
+ dAtA[i] = 0x12
+ if m.DeviceSelector != nil {
+ {
+ size, err := m.DeviceSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
}
-var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo
-
-func (m *DeviceSubRequest) Reset() { *m = DeviceSubRequest{} }
-func (*DeviceSubRequest) ProtoMessage() {}
-func (*DeviceSubRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{22}
-}
-func (m *DeviceSubRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceSubRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
+func (m *DeviceTaintSelector) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
- return b[:n], nil
-}
-func (m *DeviceSubRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceSubRequest.Merge(m, src)
-}
-func (m *DeviceSubRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceSubRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceSubRequest.DiscardUnknown(m)
+ return dAtA[:n], nil
}
-var xxx_messageInfo_DeviceSubRequest proto.InternalMessageInfo
-
-func (m *DeviceTaint) Reset() { *m = DeviceTaint{} }
-func (*DeviceTaint) ProtoMessage() {}
-func (*DeviceTaint) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{23}
+func (m *DeviceTaintSelector) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *DeviceTaint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceTaint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceTaint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceTaint.Merge(m, src)
-}
-func (m *DeviceTaint) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceTaint) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceTaint.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceTaint proto.InternalMessageInfo
-func (m *DeviceTaintRule) Reset() { *m = DeviceTaintRule{} }
-func (*DeviceTaintRule) ProtoMessage() {}
-func (*DeviceTaintRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{24}
-}
-func (m *DeviceTaintRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceTaintRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func (m *DeviceTaintSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Selectors) > 0 {
+ for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
}
- return b[:n], nil
-}
-func (m *DeviceTaintRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceTaintRule.Merge(m, src)
-}
-func (m *DeviceTaintRule) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceTaintRule) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceTaintRule.DiscardUnknown(m)
+ if m.Device != nil {
+ i -= len(*m.Device)
+ copy(dAtA[i:], *m.Device)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Device)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Pool != nil {
+ i -= len(*m.Pool)
+ copy(dAtA[i:], *m.Pool)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Pool)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Driver != nil {
+ i -= len(*m.Driver)
+ copy(dAtA[i:], *m.Driver)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Driver)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.DeviceClassName != nil {
+ i -= len(*m.DeviceClassName)
+ copy(dAtA[i:], *m.DeviceClassName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DeviceClassName)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
}
-var xxx_messageInfo_DeviceTaintRule proto.InternalMessageInfo
-
-func (m *DeviceTaintRuleList) Reset() { *m = DeviceTaintRuleList{} }
-func (*DeviceTaintRuleList) ProtoMessage() {}
-func (*DeviceTaintRuleList) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{25}
-}
-func (m *DeviceTaintRuleList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceTaintRuleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
}
- return b[:n], nil
-}
-func (m *DeviceTaintRuleList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceTaintRuleList.Merge(m, src)
-}
-func (m *DeviceTaintRuleList) XXX_Size() int {
- return m.Size()
+ dAtA[offset] = uint8(v)
+ return base
}
-func (m *DeviceTaintRuleList) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceTaintRuleList.DiscardUnknown(m)
+func (m *CELDeviceSelector) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
}
-var xxx_messageInfo_DeviceTaintRuleList proto.InternalMessageInfo
-
-func (m *DeviceTaintRuleSpec) Reset() { *m = DeviceTaintRuleSpec{} }
-func (*DeviceTaintRuleSpec) ProtoMessage() {}
-func (*DeviceTaintRuleSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{26}
-}
-func (m *DeviceTaintRuleSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceTaintRuleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func (m *DeviceSelector) Size() (n int) {
+ if m == nil {
+ return 0
}
- return b[:n], nil
-}
-func (m *DeviceTaintRuleSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceTaintRuleSpec.Merge(m, src)
-}
-func (m *DeviceTaintRuleSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceTaintRuleSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceTaintRuleSpec.DiscardUnknown(m)
+ var l int
+ _ = l
+ if m.CEL != nil {
+ l = m.CEL.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
}
-var xxx_messageInfo_DeviceTaintRuleSpec proto.InternalMessageInfo
-
-func (m *DeviceTaintSelector) Reset() { *m = DeviceTaintSelector{} }
-func (*DeviceTaintSelector) ProtoMessage() {}
-func (*DeviceTaintSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{27}
-}
-func (m *DeviceTaintSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceTaintSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func (m *DeviceTaint) Size() (n int) {
+ if m == nil {
+ return 0
}
- return b[:n], nil
-}
-func (m *DeviceTaintSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceTaintSelector.Merge(m, src)
-}
-func (m *DeviceTaintSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceTaintSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceTaintSelector.DiscardUnknown(m)
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Value)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Effect)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.TimeAdded != nil {
+ l = m.TimeAdded.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
}
-var xxx_messageInfo_DeviceTaintSelector proto.InternalMessageInfo
-
-func (m *DeviceToleration) Reset() { *m = DeviceToleration{} }
-func (*DeviceToleration) ProtoMessage() {}
-func (*DeviceToleration) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{28}
-}
-func (m *DeviceToleration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceToleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func (m *DeviceTaintRule) Size() (n int) {
+ if m == nil {
+ return 0
}
- return b[:n], nil
-}
-func (m *DeviceToleration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceToleration.Merge(m, src)
-}
-func (m *DeviceToleration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceToleration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceToleration.DiscardUnknown(m)
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
}
-var xxx_messageInfo_DeviceToleration proto.InternalMessageInfo
-
-func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} }
-func (*NetworkDeviceData) ProtoMessage() {}
-func (*NetworkDeviceData) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{29}
-}
-func (m *NetworkDeviceData) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func (m *DeviceTaintRuleList) Size() (n int) {
+ if m == nil {
+ return 0
}
- return b[:n], nil
-}
-func (m *NetworkDeviceData) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkDeviceData.Merge(m, src)
-}
-func (m *NetworkDeviceData) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkDeviceData) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkDeviceData.DiscardUnknown(m)
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
}
-var xxx_messageInfo_NetworkDeviceData proto.InternalMessageInfo
-
-func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} }
-func (*OpaqueDeviceConfiguration) ProtoMessage() {}
-func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{30}
-}
-func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src)
-}
-func (m *OpaqueDeviceConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo
-
-func (m *ResourceClaim) Reset() { *m = ResourceClaim{} }
-func (*ResourceClaim) ProtoMessage() {}
-func (*ResourceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{31}
-}
-func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaim) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaim.Merge(m, src)
-}
-func (m *ResourceClaim) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaim) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaim.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
-
-func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} }
-func (*ResourceClaimConsumerReference) ProtoMessage() {}
-func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{32}
-}
-func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func (m *DeviceTaintRuleSpec) Size() (n int) {
+ if m == nil {
+ return 0
}
- return b[:n], nil
-}
-func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src)
-}
-func (m *ResourceClaimConsumerReference) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo
-
-func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} }
-func (*ResourceClaimList) ProtoMessage() {}
-func (*ResourceClaimList) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{33}
-}
-func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+ var l int
+ _ = l
+ if m.DeviceSelector != nil {
+ l = m.DeviceSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
}
- return b[:n], nil
-}
-func (m *ResourceClaimList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimList.Merge(m, src)
-}
-func (m *ResourceClaimList) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimList) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimList.DiscardUnknown(m)
+ l = m.Taint.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
}
-var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo
-
-func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} }
-func (*ResourceClaimSpec) ProtoMessage() {}
-func (*ResourceClaimSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{34}
-}
-func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func (m *DeviceTaintSelector) Size() (n int) {
+ if m == nil {
+ return 0
}
- return b[:n], nil
-}
-func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimSpec.Merge(m, src)
-}
-func (m *ResourceClaimSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo
-
-func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} }
-func (*ResourceClaimStatus) ProtoMessage() {}
-func (*ResourceClaimStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{35}
-}
-func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+ var l int
+ _ = l
+ if m.DeviceClassName != nil {
+ l = len(*m.DeviceClassName)
+ n += 1 + l + sovGenerated(uint64(l))
}
- return b[:n], nil
-}
-func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimStatus.Merge(m, src)
-}
-func (m *ResourceClaimStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo
-
-func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} }
-func (*ResourceClaimTemplate) ProtoMessage() {}
-func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{36}
-}
-func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+ if m.Driver != nil {
+ l = len(*m.Driver)
+ n += 1 + l + sovGenerated(uint64(l))
}
- return b[:n], nil
-}
-func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimTemplate.Merge(m, src)
-}
-func (m *ResourceClaimTemplate) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimTemplate) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo
-
-func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} }
-func (*ResourceClaimTemplateList) ProtoMessage() {}
-func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{37}
-}
-func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+ if m.Pool != nil {
+ l = len(*m.Pool)
+ n += 1 + l + sovGenerated(uint64(l))
}
- return b[:n], nil
-}
-func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src)
-}
-func (m *ResourceClaimTemplateList) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo
-
-func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} }
-func (*ResourceClaimTemplateSpec) ProtoMessage() {}
-func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{38}
-}
-func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+ if m.Device != nil {
+ l = len(*m.Device)
+ n += 1 + l + sovGenerated(uint64(l))
}
- return b[:n], nil
-}
-func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src)
-}
-func (m *ResourceClaimTemplateSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo
-
-func (m *ResourcePool) Reset() { *m = ResourcePool{} }
-func (*ResourcePool) ProtoMessage() {}
-func (*ResourcePool) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{39}
-}
-func (m *ResourcePool) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourcePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+ if len(m.Selectors) > 0 {
+ for _, e := range m.Selectors {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
}
- return b[:n], nil
-}
-func (m *ResourcePool) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourcePool.Merge(m, src)
-}
-func (m *ResourcePool) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourcePool) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourcePool.DiscardUnknown(m)
+ return n
}
-var xxx_messageInfo_ResourcePool proto.InternalMessageInfo
-
-func (m *ResourceSlice) Reset() { *m = ResourceSlice{} }
-func (*ResourceSlice) ProtoMessage() {}
-func (*ResourceSlice) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{40}
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
}
-func (m *ResourceSlice) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
-func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func (this *CELDeviceSelector) String() string {
+ if this == nil {
+ return "nil"
}
- return b[:n], nil
-}
-func (m *ResourceSlice) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceSlice.Merge(m, src)
-}
-func (m *ResourceSlice) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceSlice) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceSlice.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo
-
-func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} }
-func (*ResourceSliceList) ProtoMessage() {}
-func (*ResourceSliceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{41}
-}
-func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
+ s := strings.Join([]string{`&CELDeviceSelector{`,
+ `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+ `}`,
+ }, "")
+ return s
}
-func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func (this *DeviceSelector) String() string {
+ if this == nil {
+ return "nil"
}
- return b[:n], nil
-}
-func (m *ResourceSliceList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceSliceList.Merge(m, src)
-}
-func (m *ResourceSliceList) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceSliceList) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceSliceList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo
-
-func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} }
-func (*ResourceSliceSpec) ProtoMessage() {}
-func (*ResourceSliceSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{42}
-}
-func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
+ s := strings.Join([]string{`&DeviceSelector{`,
+ `CEL:` + strings.Replace(this.CEL.String(), "CELDeviceSelector", "CELDeviceSelector", 1) + `,`,
+ `}`,
+ }, "")
+ return s
}
-func (m *ResourceSliceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func (this *DeviceTaintRule) String() string {
+ if this == nil {
+ return "nil"
}
- return b[:n], nil
-}
-func (m *ResourceSliceSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceSliceSpec.Merge(m, src)
-}
-func (m *ResourceSliceSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceSliceSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceSliceSpec.DiscardUnknown(m)
+ s := strings.Join([]string{`&DeviceTaintRule{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceTaintRuleSpec", "DeviceTaintRuleSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
}
-
-var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*AllocatedDeviceStatus)(nil), "k8s.io.api.resource.v1alpha3.AllocatedDeviceStatus")
- proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha3.AllocationResult")
- proto.RegisterType((*BasicDevice)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice")
- proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.AttributesEntry")
- proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.CapacityEntry")
- proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.CELDeviceSelector")
- proto.RegisterType((*Counter)(nil), "k8s.io.api.resource.v1alpha3.Counter")
- proto.RegisterType((*CounterSet)(nil), "k8s.io.api.resource.v1alpha3.CounterSet")
- proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1alpha3.CounterSet.CountersEntry")
- proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1alpha3.Device")
- proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceAllocationConfiguration")
- proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceAllocationResult")
- proto.RegisterType((*DeviceAttribute)(nil), "k8s.io.api.resource.v1alpha3.DeviceAttribute")
- proto.RegisterType((*DeviceClaim)(nil), "k8s.io.api.resource.v1alpha3.DeviceClaim")
- proto.RegisterType((*DeviceClaimConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceClaimConfiguration")
- proto.RegisterType((*DeviceClass)(nil), "k8s.io.api.resource.v1alpha3.DeviceClass")
- proto.RegisterType((*DeviceClassConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassConfiguration")
- proto.RegisterType((*DeviceClassList)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassList")
- proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassSpec")
- proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceConfiguration")
- proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1alpha3.DeviceConstraint")
- proto.RegisterType((*DeviceCounterConsumption)(nil), "k8s.io.api.resource.v1alpha3.DeviceCounterConsumption")
- proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1alpha3.DeviceCounterConsumption.CountersEntry")
- proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequest")
- proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequestAllocationResult")
- proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceSelector")
- proto.RegisterType((*DeviceSubRequest)(nil), "k8s.io.api.resource.v1alpha3.DeviceSubRequest")
- proto.RegisterType((*DeviceTaint)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaint")
- proto.RegisterType((*DeviceTaintRule)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintRule")
- proto.RegisterType((*DeviceTaintRuleList)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintRuleList")
- proto.RegisterType((*DeviceTaintRuleSpec)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintRuleSpec")
- proto.RegisterType((*DeviceTaintSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintSelector")
- proto.RegisterType((*DeviceToleration)(nil), "k8s.io.api.resource.v1alpha3.DeviceToleration")
- proto.RegisterType((*NetworkDeviceData)(nil), "k8s.io.api.resource.v1alpha3.NetworkDeviceData")
- proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.OpaqueDeviceConfiguration")
- proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaim")
- proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimConsumerReference")
- proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimList")
- proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimSpec")
- proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimStatus")
- proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplate")
- proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplateList")
- proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplateSpec")
- proto.RegisterType((*ResourcePool)(nil), "k8s.io.api.resource.v1alpha3.ResourcePool")
- proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1alpha3.ResourceSlice")
- proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1alpha3.ResourceSliceList")
- proto.RegisterType((*ResourceSliceSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceSliceSpec")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/resource/v1alpha3/generated.proto", fileDescriptor_66649ee9bbcd89d2)
-}
-
-var fileDescriptor_66649ee9bbcd89d2 = []byte{
- // 2635 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x1a, 0x5b, 0x6f, 0x1c, 0x57,
- 0x39, 0xb3, 0xbb, 0x5e, 0xaf, 0xbf, 0x8d, 0x1d, 0xfb, 0x84, 0x84, 0x8d, 0x49, 0x77, 0x93, 0x09,
- 0x17, 0xa7, 0x75, 0xd6, 0x8d, 0x53, 0xb5, 0x85, 0x80, 0x84, 0xd7, 0x76, 0x52, 0xa7, 0x89, 0xe3,
- 0x9c, 0x75, 0x03, 0x81, 0x12, 0x18, 0xcf, 0x1e, 0xdb, 0x83, 0x67, 0x67, 0xa6, 0x73, 0x66, 0x9d,
- 0x5a, 0x42, 0xa8, 0xe2, 0x07, 0x54, 0xbc, 0xf2, 0x80, 0x2a, 0xf1, 0x50, 0x89, 0x17, 0xe0, 0x99,
- 0x17, 0x90, 0x40, 0x6a, 0x04, 0x3c, 0x44, 0xa2, 0x42, 0x15, 0x12, 0x0b, 0x59, 0x84, 0xf8, 0x0b,
- 0xc8, 0x4f, 0xe8, 0x5c, 0xe6, 0xba, 0x3b, 0xce, 0xac, 0x49, 0xac, 0x20, 0xf5, 0x6d, 0xf7, 0x3b,
- 0xdf, 0xed, 0x7c, 0xf7, 0x73, 0xe6, 0xc0, 0xec, 0xce, 0xeb, 0xb4, 0x6e, 0xd8, 0x73, 0x9a, 0x63,
- 0xcc, 0xb9, 0x84, 0xda, 0x1d, 0x57, 0x27, 0x73, 0xbb, 0x97, 0x35, 0xd3, 0xd9, 0xd6, 0xae, 0xcc,
- 0x6d, 0x11, 0x8b, 0xb8, 0x9a, 0x47, 0x5a, 0x75, 0xc7, 0xb5, 0x3d, 0x1b, 0x9d, 0x15, 0xd8, 0x75,
- 0xcd, 0x31, 0xea, 0x3e, 0x76, 0xdd, 0xc7, 0x9e, 0xbe, 0xb4, 0x65, 0x78, 0xdb, 0x9d, 0x8d, 0xba,
- 0x6e, 0xb7, 0xe7, 0xb6, 0xec, 0x2d, 0x7b, 0x8e, 0x13, 0x6d, 0x74, 0x36, 0xf9, 0x3f, 0xfe, 0x87,
- 0xff, 0x12, 0xcc, 0xa6, 0xd5, 0x88, 0x68, 0xdd, 0x76, 0x99, 0xd8, 0xa4, 0xc0, 0xe9, 0x57, 0x42,
- 0x9c, 0xb6, 0xa6, 0x6f, 0x1b, 0x16, 0x71, 0xf7, 0xe6, 0x9c, 0x9d, 0xad, 0xb8, 0xbe, 0xc3, 0x50,
- 0xd1, 0xb9, 0x36, 0xf1, 0xb4, 0x41, 0xb2, 0xe6, 0xd2, 0xa8, 0xdc, 0x8e, 0xe5, 0x19, 0xed, 0x7e,
- 0x31, 0xaf, 0x3e, 0x89, 0x80, 0xea, 0xdb, 0xa4, 0xad, 0x25, 0xe9, 0xd4, 0x0f, 0xf2, 0x70, 0x6a,
- 0xc1, 0x34, 0x6d, 0x9d, 0xc1, 0x96, 0xc8, 0xae, 0xa1, 0x93, 0xa6, 0xa7, 0x79, 0x1d, 0x8a, 0xbe,
- 0x08, 0xc5, 0x96, 0x6b, 0xec, 0x12, 0xb7, 0xa2, 0x9c, 0x53, 0x66, 0xc6, 0x1a, 0x13, 0x0f, 0xbb,
- 0xb5, 0x63, 0xbd, 0x6e, 0xad, 0xb8, 0xc4, 0xa1, 0x58, 0xae, 0xa2, 0x73, 0x50, 0x70, 0x6c, 0xdb,
- 0xac, 0xe4, 0x38, 0xd6, 0x71, 0x89, 0x55, 0x58, 0xb3, 0x6d, 0x13, 0xf3, 0x15, 0xce, 0x89, 0x73,
- 0xae, 0xe4, 0x13, 0x9c, 0x38, 0x14, 0xcb, 0x55, 0xa4, 0x03, 0xe8, 0xb6, 0xd5, 0x32, 0x3c, 0xc3,
- 0xb6, 0x68, 0xa5, 0x70, 0x2e, 0x3f, 0x53, 0x9e, 0x9f, 0xab, 0x87, 0x6e, 0x0e, 0x36, 0x56, 0x77,
- 0x76, 0xb6, 0x18, 0x80, 0xd6, 0x99, 0xfd, 0xea, 0xbb, 0x97, 0xeb, 0x8b, 0x3e, 0x5d, 0x03, 0x49,
- 0xe6, 0x10, 0x80, 0x28, 0x8e, 0xb0, 0x45, 0x6f, 0x42, 0xa1, 0xa5, 0x79, 0x5a, 0x65, 0xe4, 0x9c,
- 0x32, 0x53, 0x9e, 0xbf, 0x94, 0xca, 0x5e, 0xda, 0xad, 0x8e, 0xb5, 0x07, 0xcb, 0xef, 0x7a, 0xc4,
- 0xa2, 0x8c, 0x79, 0x89, 0xed, 0x6c, 0x49, 0xf3, 0x34, 0xcc, 0x99, 0xa0, 0x0d, 0x28, 0x5b, 0xc4,
- 0x7b, 0x60, 0xbb, 0x3b, 0x0c, 0x58, 0x29, 0x72, 0x9e, 0x51, 0x95, 0xfb, 0x23, 0xb3, 0xbe, 0x2a,
- 0x09, 0xf8, 0x9e, 0x19, 0x59, 0xe3, 0x44, 0xaf, 0x5b, 0x2b, 0xaf, 0x86, 0x7c, 0x70, 0x94, 0xa9,
- 0xfa, 0x47, 0x05, 0x26, 0xa5, 0x87, 0x0c, 0xdb, 0xc2, 0x84, 0x76, 0x4c, 0x0f, 0x7d, 0x17, 0x46,
- 0x85, 0xd1, 0x28, 0xf7, 0x4e, 0x79, 0xfe, 0x95, 0x83, 0x85, 0x0a, 0x69, 0x49, 0x36, 0x8d, 0x13,
- 0xd2, 0x58, 0xa3, 0x62, 0x9d, 0x62, 0x9f, 0x2b, 0xba, 0x0b, 0xc7, 0x2d, 0xbb, 0x45, 0x9a, 0xc4,
- 0x24, 0xba, 0x67, 0xbb, 0xdc, 0x73, 0xe5, 0xf9, 0x73, 0x51, 0x29, 0x2c, 0x4f, 0x98, 0xed, 0x57,
- 0x23, 0x78, 0x8d, 0xc9, 0x5e, 0xb7, 0x76, 0x3c, 0x0a, 0xc1, 0x31, 0x3e, 0xea, 0xdf, 0x8a, 0x50,
- 0x6e, 0x68, 0xd4, 0xd0, 0x85, 0x44, 0xf4, 0x43, 0x00, 0xcd, 0xf3, 0x5c, 0x63, 0xa3, 0xe3, 0xf1,
- 0xbd, 0x30, 0x9f, 0x7f, 0xf9, 0xe0, 0xbd, 0x44, 0xc8, 0xeb, 0x0b, 0x01, 0xed, 0xb2, 0xe5, 0xb9,
- 0x7b, 0x8d, 0x0b, 0xbe, 0xf7, 0xc3, 0x85, 0x1f, 0xfd, 0xbd, 0x36, 0x7e, 0xa7, 0xa3, 0x99, 0xc6,
- 0xa6, 0x41, 0x5a, 0xab, 0x5a, 0x9b, 0xe0, 0x88, 0x44, 0xb4, 0x0b, 0x25, 0x5d, 0x73, 0x34, 0xdd,
- 0xf0, 0xf6, 0x2a, 0x39, 0x2e, 0xfd, 0xb5, 0xec, 0xd2, 0x17, 0x25, 0xa5, 0x90, 0x7d, 0x5e, 0xca,
- 0x2e, 0xf9, 0xe0, 0x7e, 0xc9, 0x81, 0x2c, 0xf4, 0x03, 0x98, 0xd4, 0x6d, 0x8b, 0x76, 0xda, 0x84,
- 0x2e, 0xda, 0x1d, 0xcb, 0x23, 0x2e, 0xad, 0xe4, 0xb9, 0xfc, 0x57, 0xb3, 0x78, 0x52, 0xd2, 0x2c,
- 0x72, 0x16, 0x0e, 0x0f, 0xfc, 0x8a, 0x14, 0x3f, 0xb9, 0x98, 0xe0, 0x8b, 0xfb, 0x24, 0xa1, 0x19,
- 0x28, 0x31, 0xaf, 0x30, 0x9d, 0x2a, 0x05, 0x91, 0xb7, 0x4c, 0xf1, 0x55, 0x09, 0xc3, 0xc1, 0x6a,
- 0x5f, 0x1c, 0x8c, 0x3c, 0x9d, 0x38, 0x60, 0x1a, 0x68, 0xa6, 0xc9, 0x10, 0x28, 0x4f, 0x9b, 0x92,
- 0xd0, 0x60, 0x41, 0xc2, 0x70, 0xb0, 0x8a, 0xee, 0x40, 0xd1, 0xd3, 0x0c, 0xcb, 0xa3, 0x95, 0x51,
- 0x6e, 0x9f, 0x8b, 0x59, 0xec, 0xb3, 0xce, 0x28, 0xc2, 0x42, 0xc3, 0xff, 0x52, 0x2c, 0x19, 0x4d,
- 0x9b, 0x70, 0x22, 0x11, 0x38, 0x68, 0x12, 0xf2, 0x3b, 0x64, 0x4f, 0x94, 0x3a, 0xcc, 0x7e, 0xa2,
- 0x45, 0x18, 0xd9, 0xd5, 0xcc, 0x0e, 0xe1, 0x85, 0x2d, 0x5e, 0x29, 0xd2, 0x13, 0xcc, 0xe7, 0x8a,
- 0x05, 0xed, 0x57, 0x72, 0xaf, 0x2b, 0xd3, 0x3b, 0x30, 0x1e, 0x0b, 0x94, 0x01, 0xb2, 0x96, 0xe2,
- 0xb2, 0xea, 0x07, 0x15, 0xbd, 0x50, 0xf8, 0x9d, 0x8e, 0x66, 0x79, 0x86, 0xb7, 0x17, 0x11, 0xa6,
- 0x5e, 0x87, 0xa9, 0xc5, 0xe5, 0x9b, 0xb2, 0x90, 0xfb, 0xc6, 0x9e, 0x07, 0x20, 0xef, 0x3a, 0x2e,
- 0xa1, 0xac, 0x88, 0xc9, 0x72, 0x1e, 0xd4, 0xc9, 0xe5, 0x60, 0x05, 0x47, 0xb0, 0xd4, 0xfb, 0x30,
- 0x2a, 0xc3, 0x05, 0x35, 0x7d, 0xed, 0x94, 0xc3, 0x68, 0xd7, 0x18, 0x97, 0x92, 0x46, 0xee, 0x32,
- 0x26, 0x52, 0x59, 0xf5, 0x3f, 0x0a, 0x80, 0x14, 0xd0, 0x24, 0x1e, 0xeb, 0x22, 0x16, 0x8b, 0x46,
- 0x25, 0xde, 0x45, 0x78, 0x34, 0xf2, 0x15, 0xd4, 0x82, 0x92, 0xee, 0x67, 0x4a, 0x2e, 0x4b, 0xa6,
- 0x84, 0xdc, 0xfd, 0x9f, 0xb2, 0x48, 0x4c, 0x06, 0x89, 0xea, 0x67, 0x48, 0xc0, 0x79, 0x7a, 0x03,
- 0xc6, 0x63, 0xc8, 0x03, 0x9c, 0x75, 0x35, 0xee, 0xac, 0x2f, 0x64, 0xd2, 0x22, 0xea, 0xa3, 0x5d,
- 0x90, 0x9d, 0x2f, 0xc3, 0xae, 0x6f, 0xc0, 0xc8, 0x06, 0xab, 0x38, 0x52, 0xd8, 0xc5, 0xcc, 0xc5,
- 0xa9, 0x31, 0xc6, 0x4c, 0xce, 0x01, 0x58, 0xb0, 0x50, 0xdf, 0xcf, 0xc1, 0x0b, 0xc9, 0x46, 0xb0,
- 0x68, 0x5b, 0x9b, 0xc6, 0x56, 0xc7, 0xe5, 0x7f, 0xd0, 0xd7, 0xa1, 0x28, 0x58, 0x4a, 0x8d, 0x66,
- 0xfc, 0x04, 0x6a, 0x72, 0xe8, 0x7e, 0xb7, 0x76, 0x3a, 0x49, 0x2a, 0x56, 0xb0, 0xa4, 0x63, 0x79,
- 0xed, 0x92, 0x77, 0x3a, 0x84, 0x7a, 0xc2, 0x4b, 0xb2, 0xb2, 0x60, 0x09, 0xc3, 0xc1, 0x2a, 0x7a,
- 0x4f, 0x81, 0x93, 0x2d, 0x59, 0xcc, 0x22, 0x3a, 0xc8, 0x4e, 0x73, 0x39, 0x5b, 0x15, 0x8c, 0x10,
- 0x36, 0x3e, 0x27, 0x95, 0x3d, 0x39, 0x60, 0x11, 0x0f, 0x12, 0xa5, 0xfe, 0x4b, 0x81, 0xd3, 0x83,
- 0x3b, 0x23, 0xda, 0x84, 0x51, 0x97, 0xff, 0xf2, 0x9b, 0xd2, 0xd5, 0x2c, 0x0a, 0xc9, 0x6d, 0xa6,
- 0xf7, 0x59, 0xf1, 0x9f, 0x62, 0x9f, 0x39, 0xd2, 0xa1, 0xa8, 0x73, 0x9d, 0x64, 0x4c, 0x5f, 0x1d,
- 0xae, 0x8f, 0xc7, 0x2d, 0x10, 0xd4, 0x3b, 0x01, 0xc6, 0x92, 0xb5, 0xfa, 0x73, 0x05, 0x4e, 0x24,
- 0x0a, 0x14, 0xaa, 0x42, 0xde, 0xb0, 0x3c, 0x1e, 0x56, 0x79, 0xe1, 0xa3, 0x15, 0xcb, 0x13, 0x19,
- 0xca, 0x16, 0xd0, 0x79, 0x28, 0x6c, 0xb0, 0xb1, 0x2e, 0xcf, 0x8b, 0xf3, 0x78, 0xaf, 0x5b, 0x1b,
- 0x6b, 0xd8, 0xb6, 0x29, 0x30, 0xf8, 0x12, 0xfa, 0x12, 0x14, 0xa9, 0xe7, 0x1a, 0xd6, 0x96, 0xec,
- 0x21, 0x7c, 0x8e, 0x69, 0x72, 0x88, 0x40, 0x93, 0xcb, 0xe8, 0x45, 0x18, 0xdd, 0x25, 0x2e, 0x2f,
- 0x3e, 0x23, 0x1c, 0x93, 0x77, 0x87, 0xbb, 0x02, 0x24, 0x50, 0x7d, 0x04, 0xf5, 0x97, 0x39, 0x28,
- 0x4b, 0x07, 0x9a, 0x9a, 0xd1, 0x46, 0xf7, 0x22, 0x01, 0x25, 0x3c, 0xf1, 0xd2, 0x10, 0x9e, 0x08,
- 0x73, 0x7d, 0x40, 0x04, 0x12, 0x28, 0xb3, 0xce, 0xe8, 0xb9, 0xa2, 0xbd, 0x08, 0x07, 0xd4, 0x33,
- 0x06, 0x9e, 0x24, 0x6b, 0x9c, 0x94, 0x02, 0xca, 0x21, 0x8c, 0xe2, 0x28, 0x5f, 0x74, 0x3f, 0x70,
- 0xf1, 0x30, 0x0d, 0x9e, 0x6d, 0x3e, 0x9b, 0x77, 0x3f, 0x52, 0xa0, 0x92, 0x46, 0x14, 0xcb, 0x47,
- 0xe5, 0x50, 0xf9, 0x98, 0x3b, 0xba, 0x7c, 0xfc, 0xad, 0x12, 0xf1, 0x3d, 0xa5, 0xe8, 0x7b, 0x50,
- 0x62, 0x03, 0x3e, 0x9f, 0xd7, 0x45, 0xef, 0x79, 0x39, 0xdb, 0x71, 0xe0, 0xf6, 0xc6, 0xf7, 0x89,
- 0xee, 0xdd, 0x22, 0x9e, 0x16, 0xf6, 0xb9, 0x10, 0x86, 0x03, 0xae, 0xe8, 0x36, 0x14, 0xa8, 0x43,
- 0xf4, 0x61, 0x7a, 0x3c, 0x57, 0xad, 0xe9, 0x10, 0x3d, 0xac, 0xd7, 0xec, 0x1f, 0xe6, 0x8c, 0xd4,
- 0x9f, 0x46, 0x9d, 0x41, 0x69, 0xdc, 0x19, 0x69, 0x26, 0x56, 0x8e, 0xce, 0xc4, 0xbf, 0x09, 0x4a,
- 0x01, 0xd7, 0xef, 0xa6, 0x41, 0x3d, 0xf4, 0x76, 0x9f, 0x99, 0xeb, 0xd9, 0xcc, 0xcc, 0xa8, 0xb9,
- 0x91, 0x83, 0x2c, 0xf3, 0x21, 0x11, 0x13, 0xaf, 0xc2, 0x88, 0xe1, 0x91, 0xb6, 0x9f, 0x5f, 0x17,
- 0x33, 0xdb, 0x38, 0x1c, 0x1c, 0x56, 0x18, 0x3d, 0x16, 0x6c, 0xd4, 0x47, 0xf1, 0x1d, 0x30, 0xdb,
- 0xa3, 0xef, 0xc0, 0x18, 0x95, 0xc3, 0x8e, 0x5f, 0x25, 0x66, 0xb3, 0xc8, 0x09, 0xc6, 0xd5, 0x29,
- 0x29, 0x6a, 0xcc, 0x87, 0x50, 0x1c, 0x72, 0x8c, 0x64, 0x70, 0x6e, 0xa8, 0x0c, 0x4e, 0xf8, 0x3f,
- 0x35, 0x83, 0x5d, 0x18, 0xe4, 0x40, 0xf4, 0x6d, 0x28, 0xda, 0x8e, 0xf6, 0x4e, 0x30, 0x78, 0x3d,
- 0xe1, 0x64, 0x72, 0x9b, 0xe3, 0x0e, 0x0a, 0x13, 0x60, 0x32, 0xc5, 0x32, 0x96, 0x2c, 0xd5, 0xf7,
- 0x15, 0x98, 0x4c, 0x16, 0xb3, 0x21, 0xaa, 0xc5, 0x1a, 0x4c, 0xb4, 0x35, 0x4f, 0xdf, 0x0e, 0x1a,
- 0x8a, 0x3c, 0xff, 0xcf, 0xf4, 0xba, 0xb5, 0x89, 0x5b, 0xb1, 0x95, 0xfd, 0x6e, 0x0d, 0x5d, 0xeb,
- 0x98, 0xe6, 0x5e, 0xfc, 0x2c, 0x94, 0xa0, 0x57, 0x3f, 0xcc, 0x05, 0x99, 0xd3, 0x77, 0xb8, 0x61,
- 0x13, 0xac, 0x1e, 0x8c, 0x73, 0xc9, 0x09, 0x36, 0x1c, 0xf4, 0x70, 0x04, 0x0b, 0xb9, 0x7d, 0x03,
- 0xe3, 0xd2, 0xe1, 0x8e, 0x56, 0xcf, 0xd9, 0xf8, 0xf8, 0xd7, 0x02, 0x8c, 0xc7, 0x9a, 0x5c, 0x86,
- 0x31, 0x72, 0x01, 0x4e, 0xb4, 0xc2, 0xa8, 0xe4, 0xe7, 0x3e, 0xe1, 0xaf, 0xcf, 0x4a, 0xe4, 0x68,
- 0x4a, 0x71, 0xba, 0x24, 0x7e, 0x3c, 0xc7, 0xf2, 0x4f, 0x3d, 0xc7, 0xee, 0xc2, 0x84, 0x16, 0x8c,
- 0x35, 0xb7, 0xec, 0x96, 0x7f, 0x30, 0xad, 0x4b, 0xaa, 0x89, 0x85, 0xd8, 0xea, 0x7e, 0xb7, 0xf6,
- 0x99, 0xe4, 0x30, 0xc4, 0xe0, 0x38, 0xc1, 0x05, 0x5d, 0x80, 0x11, 0xee, 0x1d, 0x3e, 0x79, 0xe4,
- 0xc3, 0x9a, 0xc2, 0x0d, 0x8b, 0xc5, 0x1a, 0xba, 0x0c, 0x65, 0xad, 0xd5, 0x36, 0xac, 0x05, 0x5d,
- 0x27, 0xd4, 0x3f, 0x90, 0xf2, 0x71, 0x66, 0x21, 0x04, 0xe3, 0x28, 0x0e, 0xb2, 0x60, 0x62, 0xd3,
- 0x70, 0xa9, 0xb7, 0xb0, 0xab, 0x19, 0xa6, 0xb6, 0x61, 0x12, 0x79, 0x3c, 0xcd, 0x34, 0x3f, 0x34,
- 0x3b, 0x1b, 0xfe, 0x80, 0x72, 0xda, 0xdf, 0xdf, 0xb5, 0x18, 0x37, 0x9c, 0xe0, 0xce, 0x86, 0x15,
- 0xcf, 0x36, 0x89, 0xc8, 0x68, 0x5a, 0x29, 0x65, 0x17, 0xb6, 0x1e, 0x90, 0x85, 0xc3, 0x4a, 0x08,
- 0xa3, 0x38, 0xca, 0x57, 0xfd, 0x4b, 0x70, 0x46, 0x48, 0x99, 0x65, 0xd1, 0x45, 0x36, 0x19, 0xf3,
- 0x25, 0x19, 0x6f, 0x91, 0xe1, 0x96, 0x83, 0xb1, 0xbf, 0x1e, 0xb9, 0x42, 0xcc, 0x65, 0xba, 0x42,
- 0xcc, 0x67, 0xb8, 0x42, 0x2c, 0x1c, 0x78, 0x85, 0x98, 0x70, 0xe4, 0x48, 0x06, 0x47, 0x26, 0x0c,
- 0x5b, 0x7c, 0x46, 0x86, 0x7d, 0x1b, 0x26, 0x12, 0xa7, 0xf2, 0x1b, 0x90, 0xd7, 0x89, 0x29, 0x6b,
- 0xfb, 0x13, 0x2e, 0x0d, 0xfb, 0xce, 0xf4, 0x8d, 0xd1, 0x5e, 0xb7, 0x96, 0x5f, 0x5c, 0xbe, 0x89,
- 0x19, 0x13, 0xf5, 0xd7, 0x79, 0xbf, 0x9a, 0x87, 0xa1, 0xf5, 0x69, 0x59, 0xf8, 0x5f, 0xcb, 0x42,
- 0x22, 0x34, 0x46, 0x9f, 0x51, 0x68, 0xfc, 0x3b, 0x18, 0x7b, 0xf9, 0x3d, 0x15, 0x7a, 0x21, 0xd2,
- 0x33, 0x1a, 0x65, 0x49, 0x9e, 0x7f, 0x93, 0xec, 0x89, 0x06, 0x72, 0x21, 0xda, 0x40, 0xc6, 0x06,
- 0x5f, 0xaf, 0xa0, 0xab, 0x50, 0x24, 0x9b, 0x9b, 0x44, 0xf7, 0x64, 0x52, 0xf9, 0x17, 0xa3, 0xc5,
- 0x65, 0x0e, 0xdd, 0xef, 0xd6, 0xa6, 0x22, 0x22, 0x05, 0x10, 0x4b, 0x12, 0xf4, 0x0d, 0x18, 0xf3,
- 0x8c, 0x36, 0x59, 0x68, 0xb5, 0x48, 0x8b, 0xdb, 0xbb, 0x3c, 0xff, 0x62, 0xb6, 0x89, 0x70, 0xdd,
- 0x68, 0x13, 0x71, 0x58, 0x5c, 0xf7, 0x19, 0xe0, 0x90, 0x97, 0xfa, 0x30, 0x98, 0xdd, 0xb8, 0x58,
- 0xdc, 0x31, 0xc9, 0x11, 0x0c, 0xf9, 0xcd, 0xd8, 0x90, 0x7f, 0x39, 0xf3, 0xfd, 0x21, 0x53, 0x2f,
- 0x75, 0xd0, 0xff, 0x48, 0xf1, 0x87, 0xb6, 0x00, 0xf7, 0x08, 0x86, 0x69, 0x1c, 0x1f, 0xa6, 0x2f,
- 0x0d, 0xb5, 0x97, 0x94, 0x81, 0xfa, 0xe3, 0xfe, 0x9d, 0xf0, 0xa1, 0xba, 0x0d, 0x13, 0xad, 0x58,
- 0xaa, 0x0e, 0x73, 0x4e, 0xe1, 0xac, 0x82, 0x1c, 0x47, 0x2c, 0x53, 0xe3, 0x79, 0x8f, 0x13, 0xcc,
- 0xd9, 0x39, 0x81, 0x5f, 0xcf, 0x66, 0xbb, 0xe9, 0x8a, 0x5e, 0xf3, 0x06, 0xdb, 0x12, 0xfa, 0x0b,
- 0x36, 0xea, 0x4f, 0x72, 0xb1, 0x6d, 0x05, 0x72, 0xbe, 0xd6, 0x5f, 0xf3, 0x44, 0xa6, 0x9d, 0xcc,
- 0x54, 0xef, 0xd4, 0x44, 0x4f, 0x83, 0x01, 0xfd, 0xec, 0x6c, 0xac, 0x9f, 0x95, 0x12, 0xbd, 0x4c,
- 0x4d, 0xf4, 0x32, 0x18, 0xd0, 0xc7, 0x62, 0x55, 0x75, 0xe4, 0x69, 0x57, 0x55, 0xf5, 0x67, 0x39,
- 0xbf, 0x5d, 0x84, 0x45, 0xe9, 0x49, 0x65, 0xe7, 0x0d, 0x28, 0xd9, 0x0e, 0xc3, 0xb5, 0xfd, 0xad,
- 0xcf, 0xfa, 0x81, 0x7a, 0x5b, 0xc2, 0xf7, 0xbb, 0xb5, 0x4a, 0x92, 0xad, 0xbf, 0x86, 0x03, 0xea,
- 0xb0, 0x80, 0xe5, 0x33, 0x15, 0xb0, 0xc2, 0xf0, 0x05, 0x6c, 0x11, 0xa6, 0xc2, 0x02, 0xdb, 0x24,
- 0xba, 0x6d, 0xb5, 0xa8, 0xac, 0xf4, 0xa7, 0x7a, 0xdd, 0xda, 0xd4, 0x7a, 0x72, 0x11, 0xf7, 0xe3,
- 0xab, 0xbf, 0x50, 0x60, 0xaa, 0xef, 0x63, 0x1d, 0xba, 0x0a, 0xe3, 0x06, 0x9b, 0xc8, 0x37, 0x35,
- 0x9d, 0x44, 0x82, 0xe7, 0x94, 0x54, 0x6f, 0x7c, 0x25, 0xba, 0x88, 0xe3, 0xb8, 0xe8, 0x0c, 0xe4,
- 0x0d, 0xc7, 0xbf, 0x18, 0xe5, 0x1d, 0x7c, 0x65, 0x8d, 0x62, 0x06, 0x63, 0xad, 0x78, 0x5b, 0x73,
- 0x5b, 0x0f, 0x34, 0x97, 0xd5, 0x4a, 0x97, 0x4d, 0x2f, 0xf9, 0x78, 0x2b, 0x7e, 0x23, 0xbe, 0x8c,
- 0x93, 0xf8, 0xea, 0x87, 0x0a, 0x9c, 0x49, 0x3d, 0x04, 0x66, 0xfe, 0x9e, 0xab, 0x01, 0x38, 0x9a,
- 0xab, 0xb5, 0x89, 0x3c, 0x38, 0x1d, 0xe2, 0x33, 0x69, 0x50, 0x8e, 0xd7, 0x02, 0x46, 0x38, 0xc2,
- 0x54, 0xfd, 0x20, 0x07, 0xe3, 0x58, 0x46, 0xb0, 0xb8, 0xe5, 0x7b, 0xf6, 0x4d, 0xe0, 0x4e, 0xac,
- 0x09, 0x3c, 0x61, 0xdc, 0x8a, 0x29, 0x97, 0xd6, 0x02, 0xd0, 0x3d, 0x28, 0x52, 0xfe, 0xad, 0x3c,
- 0xdb, 0x9d, 0x75, 0x9c, 0x29, 0x27, 0x0c, 0x9d, 0x20, 0xfe, 0x63, 0xc9, 0x50, 0xed, 0x29, 0x50,
- 0x8d, 0xe1, 0xcb, 0x8f, 0x7a, 0x2e, 0x26, 0x9b, 0xc4, 0x25, 0x96, 0x4e, 0xd0, 0x2c, 0x94, 0x34,
- 0xc7, 0xb8, 0xee, 0xda, 0x1d, 0x47, 0x7a, 0x34, 0x68, 0x1c, 0x0b, 0x6b, 0x2b, 0x1c, 0x8e, 0x03,
- 0x0c, 0x86, 0xed, 0x6b, 0x24, 0xe3, 0x2a, 0x72, 0x33, 0x2a, 0xe0, 0x38, 0xc0, 0x08, 0x26, 0xc7,
- 0x42, 0xea, 0xe4, 0xd8, 0x80, 0x7c, 0xc7, 0x68, 0xc9, 0xeb, 0xdc, 0x97, 0xfd, 0x62, 0xf1, 0xd6,
- 0xca, 0xd2, 0x7e, 0xb7, 0x76, 0x3e, 0xed, 0x2d, 0x82, 0xb7, 0xe7, 0x10, 0x5a, 0x7f, 0x6b, 0x65,
- 0x09, 0x33, 0x62, 0xf5, 0x77, 0x0a, 0x4c, 0xc5, 0x36, 0x79, 0x04, 0x0d, 0x74, 0x2d, 0xde, 0x40,
- 0x5f, 0x1a, 0xc2, 0x65, 0x29, 0xed, 0xd3, 0x48, 0x6c, 0x82, 0xf7, 0xce, 0xf5, 0xe4, 0xf7, 0xf9,
- 0x8b, 0x99, 0x2f, 0x7d, 0xd3, 0x3f, 0xca, 0xab, 0x7f, 0xc8, 0xc1, 0xc9, 0x01, 0x51, 0x84, 0xee,
- 0x03, 0x84, 0xe3, 0xed, 0x00, 0xa3, 0x0d, 0x10, 0xd8, 0xf7, 0x89, 0x62, 0x82, 0x7f, 0x35, 0x0f,
- 0xa1, 0x11, 0x8e, 0x88, 0x42, 0xd9, 0x25, 0x94, 0xb8, 0xbb, 0xa4, 0x75, 0x8d, 0x57, 0x7f, 0x66,
- 0xba, 0xaf, 0x0e, 0x61, 0xba, 0xbe, 0xe8, 0x0d, 0xa7, 0x62, 0x1c, 0x32, 0xc6, 0x51, 0x29, 0xe8,
- 0x7e, 0x68, 0x42, 0xf1, 0x14, 0xe4, 0x4a, 0xa6, 0x1d, 0xc5, 0x5f, 0xb1, 0x1c, 0x60, 0xcc, 0x8f,
- 0x15, 0x38, 0x15, 0x53, 0x72, 0x9d, 0xb4, 0x1d, 0x53, 0xf3, 0x8e, 0x62, 0x22, 0xbd, 0x17, 0x2b,
- 0x46, 0xaf, 0x0d, 0x61, 0x49, 0x5f, 0xc9, 0xd4, 0xb9, 0xf4, 0xcf, 0x0a, 0x9c, 0x19, 0x48, 0x71,
- 0x04, 0xc9, 0xf5, 0xcd, 0x78, 0x72, 0x5d, 0x39, 0xc4, 0xbe, 0xd2, 0x2f, 0x7d, 0xcf, 0xa4, 0xda,
- 0xe1, 0xff, 0xb2, 0x7b, 0xa8, 0xbf, 0x52, 0xe0, 0xb8, 0x8f, 0xc9, 0xa6, 0xc3, 0x0c, 0xc7, 0xf5,
- 0x79, 0x00, 0xf9, 0x7e, 0xcb, 0xff, 0x30, 0x93, 0x0f, 0xf5, 0xbe, 0x1e, 0xac, 0xe0, 0x08, 0x16,
- 0xba, 0x01, 0xc8, 0xd7, 0xb0, 0x69, 0xfa, 0xd7, 0x9b, 0xbc, 0x05, 0xe4, 0x1b, 0xd3, 0x92, 0x16,
- 0xe1, 0x3e, 0x0c, 0x3c, 0x80, 0x4a, 0xfd, 0xbd, 0x12, 0xf6, 0x6d, 0x0e, 0x7e, 0x5e, 0x2d, 0xcf,
- 0x95, 0x4b, 0xb5, 0x7c, 0xb4, 0xef, 0x70, 0xcc, 0xe7, 0xb6, 0xef, 0x70, 0xed, 0x52, 0x52, 0xe2,
- 0x4f, 0x85, 0xc4, 0x2e, 0x78, 0x2a, 0x64, 0x9d, 0xf2, 0x6e, 0x46, 0x5e, 0xed, 0xc5, 0x4f, 0xf7,
- 0x07, 0xa8, 0xc3, 0xc2, 0x74, 0xe0, 0xf5, 0xdc, 0x6c, 0xe4, 0x3d, 0x51, 0x62, 0xba, 0xc8, 0xf0,
- 0xa6, 0xa8, 0xf0, 0x94, 0xde, 0x14, 0xcd, 0x46, 0xde, 0x14, 0x89, 0x9b, 0xbf, 0x70, 0x22, 0xea,
- 0x7f, 0x57, 0x74, 0x3b, 0xec, 0x2f, 0xe2, 0xce, 0xef, 0xf3, 0x59, 0x5a, 0xf4, 0x01, 0x4f, 0xe6,
- 0x30, 0x9c, 0x76, 0x88, 0x2b, 0xc0, 0xa1, 0x96, 0x2c, 0x53, 0x47, 0xb9, 0x32, 0xd3, 0xbd, 0x6e,
- 0xed, 0xf4, 0xda, 0x40, 0x0c, 0x9c, 0x42, 0x89, 0xb6, 0x61, 0x82, 0x6e, 0x6b, 0x2e, 0x69, 0x05,
- 0x8f, 0xc4, 0xc4, 0xc5, 0xef, 0x4c, 0xd6, 0xa7, 0x2f, 0xe1, 0xfd, 0x72, 0x33, 0xc6, 0x07, 0x27,
- 0xf8, 0x36, 0x1a, 0x0f, 0x1f, 0x57, 0x8f, 0x3d, 0x7a, 0x5c, 0x3d, 0xf6, 0xc9, 0xe3, 0xea, 0xb1,
- 0xf7, 0x7a, 0x55, 0xe5, 0x61, 0xaf, 0xaa, 0x3c, 0xea, 0x55, 0x95, 0x4f, 0x7a, 0x55, 0xe5, 0x1f,
- 0xbd, 0xaa, 0xf2, 0xe3, 0x7f, 0x56, 0x8f, 0x7d, 0xeb, 0xec, 0x41, 0x4f, 0x74, 0xff, 0x1b, 0x00,
- 0x00, 0xff, 0xff, 0xa5, 0x57, 0x37, 0xad, 0xc1, 0x2b, 0x00, 0x00,
-}
-
-func (m *AllocatedDeviceStatus) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AllocatedDeviceStatus) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AllocatedDeviceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.NetworkData != nil {
- {
- size, err := m.NetworkData.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- if m.Data != nil {
- {
- size, err := m.Data.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- if len(m.Conditions) > 0 {
- for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- }
- i -= len(m.Device)
- copy(dAtA[i:], m.Device)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device)))
- i--
- dAtA[i] = 0x1a
- i -= len(m.Pool)
- copy(dAtA[i:], m.Pool)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool)))
- i--
- dAtA[i] = 0x12
- i -= len(m.Driver)
- copy(dAtA[i:], m.Driver)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *AllocationResult) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.NodeSelector != nil {
- {
- size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- {
- size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *BasicDevice) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *BasicDevice) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *BasicDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Taints) > 0 {
- for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- if m.AllNodes != nil {
- i--
- if *m.AllNodes {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x30
- }
- if m.NodeSelector != nil {
- {
- size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- if m.NodeName != nil {
- i -= len(*m.NodeName)
- copy(dAtA[i:], *m.NodeName)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName)))
- i--
- dAtA[i] = 0x22
- }
- if len(m.ConsumesCounters) > 0 {
- for iNdEx := len(m.ConsumesCounters) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ConsumesCounters[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.Capacity) > 0 {
- keysForCapacity := make([]string, 0, len(m.Capacity))
- for k := range m.Capacity {
- keysForCapacity = append(keysForCapacity, string(k))
- }
- github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
- for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
- v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])]
- baseI := i
- {
- size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- i -= len(keysForCapacity[iNdEx])
- copy(dAtA[i:], keysForCapacity[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx])))
- i--
- dAtA[i] = 0xa
- i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.Attributes) > 0 {
- keysForAttributes := make([]string, 0, len(m.Attributes))
- for k := range m.Attributes {
- keysForAttributes = append(keysForAttributes, string(k))
- }
- github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
- for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- {
- v := m.Attributes[QualifiedName(keysForAttributes[iNdEx])]
- baseI := i
- {
- size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- i -= len(keysForAttributes[iNdEx])
- copy(dAtA[i:], keysForAttributes[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttributes[iNdEx])))
- i--
- dAtA[i] = 0xa
- i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *CELDeviceSelector) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CELDeviceSelector) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CELDeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- i -= len(m.Expression)
- copy(dAtA[i:], m.Expression)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *Counter) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Counter) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Value.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *CounterSet) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CounterSet) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CounterSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Counters) > 0 {
- keysForCounters := make([]string, 0, len(m.Counters))
- for k := range m.Counters {
- keysForCounters = append(keysForCounters, string(k))
- }
- github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
- for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- {
- v := m.Counters[string(keysForCounters[iNdEx])]
- baseI := i
- {
- size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- i -= len(keysForCounters[iNdEx])
- copy(dAtA[i:], keysForCounters[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCounters[iNdEx])))
- i--
- dAtA[i] = 0xa
- i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
- i--
- dAtA[i] = 0x12
- }
- }
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *Device) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Device) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Basic != nil {
- {
- size, err := m.Basic.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceAllocationConfiguration) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceAllocationConfiguration) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceAllocationConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- if len(m.Requests) > 0 {
- for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Requests[iNdEx])
- copy(dAtA[i:], m.Requests[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- i -= len(m.Source)
- copy(dAtA[i:], m.Source)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Source)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceAllocationResult) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceAllocationResult) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Config) > 0 {
- for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.Results) > 0 {
- for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceAttribute) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceAttribute) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.VersionValue != nil {
- i -= len(*m.VersionValue)
- copy(dAtA[i:], *m.VersionValue)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VersionValue)))
- i--
- dAtA[i] = 0x2a
- }
- if m.StringValue != nil {
- i -= len(*m.StringValue)
- copy(dAtA[i:], *m.StringValue)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StringValue)))
- i--
- dAtA[i] = 0x22
- }
- if m.BoolValue != nil {
- i--
- if *m.BoolValue {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x18
- }
- if m.IntValue != nil {
- i = encodeVarintGenerated(dAtA, i, uint64(*m.IntValue))
- i--
- dAtA[i] = 0x10
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceClaim) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceClaim) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Config) > 0 {
- for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.Constraints) > 0 {
- for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.Requests) > 0 {
- for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceClaimConfiguration) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceClaimConfiguration) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceClaimConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- if len(m.Requests) > 0 {
- for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Requests[iNdEx])
- copy(dAtA[i:], m.Requests[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx])))
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceClass) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceClass) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceClassConfiguration) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceClassConfiguration) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceClassConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceClassList) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceClassList) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Items) > 0 {
- for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceClassSpec) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceClassSpec) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Config) > 0 {
- for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.Selectors) > 0 {
- for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceConfiguration) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceConfiguration) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Opaque != nil {
- {
- size, err := m.Opaque.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceConstraint) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceConstraint) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.MatchAttribute != nil {
- i -= len(*m.MatchAttribute)
- copy(dAtA[i:], *m.MatchAttribute)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchAttribute)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Requests) > 0 {
- for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Requests[iNdEx])
- copy(dAtA[i:], m.Requests[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx])))
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceCounterConsumption) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceCounterConsumption) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceCounterConsumption) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Counters) > 0 {
- keysForCounters := make([]string, 0, len(m.Counters))
- for k := range m.Counters {
- keysForCounters = append(keysForCounters, string(k))
- }
- github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
- for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- {
- v := m.Counters[string(keysForCounters[iNdEx])]
- baseI := i
- {
- size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- i -= len(keysForCounters[iNdEx])
- copy(dAtA[i:], keysForCounters[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCounters[iNdEx])))
- i--
- dAtA[i] = 0xa
- i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
- i--
- dAtA[i] = 0x12
- }
- }
- i -= len(m.CounterSet)
- copy(dAtA[i:], m.CounterSet)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.CounterSet)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Tolerations) > 0 {
- for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x42
- }
- }
- if len(m.FirstAvailable) > 0 {
- for iNdEx := len(m.FirstAvailable) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.FirstAvailable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- if m.AdminAccess != nil {
- i--
- if *m.AdminAccess {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x30
- }
- i = encodeVarintGenerated(dAtA, i, uint64(m.Count))
- i--
- dAtA[i] = 0x28
- i -= len(m.AllocationMode)
- copy(dAtA[i:], m.AllocationMode)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode)))
- i--
- dAtA[i] = 0x22
- if len(m.Selectors) > 0 {
- for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- i -= len(m.DeviceClassName)
- copy(dAtA[i:], m.DeviceClassName)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName)))
- i--
- dAtA[i] = 0x12
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceRequestAllocationResult) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceRequestAllocationResult) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Tolerations) > 0 {
- for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- }
- if m.AdminAccess != nil {
- i--
- if *m.AdminAccess {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x28
- }
- i -= len(m.Device)
- copy(dAtA[i:], m.Device)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device)))
- i--
- dAtA[i] = 0x22
- i -= len(m.Pool)
- copy(dAtA[i:], m.Pool)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool)))
- i--
- dAtA[i] = 0x1a
- i -= len(m.Driver)
- copy(dAtA[i:], m.Driver)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
- i--
- dAtA[i] = 0x12
- i -= len(m.Request)
- copy(dAtA[i:], m.Request)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceSelector) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceSelector) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.CEL != nil {
- {
- size, err := m.CEL.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceSubRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceSubRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceSubRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Tolerations) > 0 {
- for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- i = encodeVarintGenerated(dAtA, i, uint64(m.Count))
- i--
- dAtA[i] = 0x28
- i -= len(m.AllocationMode)
- copy(dAtA[i:], m.AllocationMode)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode)))
- i--
- dAtA[i] = 0x22
- if len(m.Selectors) > 0 {
- for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- i -= len(m.DeviceClassName)
- copy(dAtA[i:], m.DeviceClassName)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName)))
- i--
- dAtA[i] = 0x12
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceTaint) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceTaint) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceTaint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.TimeAdded != nil {
- {
- size, err := m.TimeAdded.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- i -= len(m.Effect)
- copy(dAtA[i:], m.Effect)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect)))
- i--
- dAtA[i] = 0x1a
- i -= len(m.Value)
- copy(dAtA[i:], m.Value)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
- i--
- dAtA[i] = 0x12
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceTaintRule) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceTaintRule) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceTaintRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceTaintRuleList) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceTaintRuleList) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceTaintRuleList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Items) > 0 {
- for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceTaintRuleSpec) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceTaintRuleSpec) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceTaintRuleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Taint.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- if m.DeviceSelector != nil {
- {
- size, err := m.DeviceSelector.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceTaintSelector) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceTaintSelector) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceTaintSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Selectors) > 0 {
- for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- }
- if m.Device != nil {
- i -= len(*m.Device)
- copy(dAtA[i:], *m.Device)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Device)))
- i--
- dAtA[i] = 0x22
- }
- if m.Pool != nil {
- i -= len(*m.Pool)
- copy(dAtA[i:], *m.Pool)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Pool)))
- i--
- dAtA[i] = 0x1a
- }
- if m.Driver != nil {
- i -= len(*m.Driver)
- copy(dAtA[i:], *m.Driver)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Driver)))
- i--
- dAtA[i] = 0x12
- }
- if m.DeviceClassName != nil {
- i -= len(*m.DeviceClassName)
- copy(dAtA[i:], *m.DeviceClassName)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DeviceClassName)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DeviceToleration) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeviceToleration) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeviceToleration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.TolerationSeconds != nil {
- i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds))
- i--
- dAtA[i] = 0x28
- }
- i -= len(m.Effect)
- copy(dAtA[i:], m.Effect)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect)))
- i--
- dAtA[i] = 0x22
- i -= len(m.Value)
- copy(dAtA[i:], m.Value)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
- i--
- dAtA[i] = 0x1a
- i -= len(m.Operator)
- copy(dAtA[i:], m.Operator)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
- i--
- dAtA[i] = 0x12
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *NetworkDeviceData) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *NetworkDeviceData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- i -= len(m.HardwareAddress)
- copy(dAtA[i:], m.HardwareAddress)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.HardwareAddress)))
- i--
- dAtA[i] = 0x1a
- if len(m.IPs) > 0 {
- for iNdEx := len(m.IPs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.IPs[iNdEx])
- copy(dAtA[i:], m.IPs[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPs[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- i -= len(m.InterfaceName)
- copy(dAtA[i:], m.InterfaceName)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterfaceName)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- i -= len(m.Driver)
- copy(dAtA[i:], m.Driver)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceClaim) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- i -= len(m.UID)
- copy(dAtA[i:], m.UID)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
- i--
- dAtA[i] = 0x2a
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0x22
- i -= len(m.Resource)
- copy(dAtA[i:], m.Resource)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
- i--
- dAtA[i] = 0x1a
- i -= len(m.APIGroup)
- copy(dAtA[i:], m.APIGroup)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Items) > 0 {
- for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Devices) > 0 {
- for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- }
- if len(m.ReservedFor) > 0 {
- for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Allocation != nil {
- {
- size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Items) > 0 {
- for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ResourcePool) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount))
- i--
- dAtA[i] = 0x18
- i = encodeVarintGenerated(dAtA, i, uint64(m.Generation))
- i--
- dAtA[i] = 0x10
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceSlice) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Items) > 0 {
- for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.SharedCounters) > 0 {
- for iNdEx := len(m.SharedCounters) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.SharedCounters[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x42
- }
- }
- if m.PerDeviceNodeSelection != nil {
- i--
- if *m.PerDeviceNodeSelection {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x38
- }
- if len(m.Devices) > 0 {
- for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- }
- i--
- if m.AllNodes {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x28
- if m.NodeSelector != nil {
- {
- size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- i -= len(m.NodeName)
- copy(dAtA[i:], m.NodeName)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName)))
- i--
- dAtA[i] = 0x1a
- {
- size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- i -= len(m.Driver)
- copy(dAtA[i:], m.Driver)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
- offset -= sovGenerated(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *AllocatedDeviceStatus) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Driver)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Pool)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Device)
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Conditions) > 0 {
- for _, e := range m.Conditions {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- if m.Data != nil {
- l = m.Data.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.NetworkData != nil {
- l = m.NetworkData.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- return n
-}
-
-func (m *AllocationResult) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Devices.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if m.NodeSelector != nil {
- l = m.NodeSelector.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- return n
-}
-
-func (m *BasicDevice) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Attributes) > 0 {
- for k, v := range m.Attributes {
- _ = k
- _ = v
- l = v.Size()
- mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
- n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
- }
- }
- if len(m.Capacity) > 0 {
- for k, v := range m.Capacity {
- _ = k
- _ = v
- l = v.Size()
- mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
- n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
- }
- }
- if len(m.ConsumesCounters) > 0 {
- for _, e := range m.ConsumesCounters {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- if m.NodeName != nil {
- l = len(*m.NodeName)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.NodeSelector != nil {
- l = m.NodeSelector.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.AllNodes != nil {
- n += 2
- }
- if len(m.Taints) > 0 {
- for _, e := range m.Taints {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *CELDeviceSelector) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Expression)
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *Counter) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Value.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *CounterSet) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Counters) > 0 {
- for k, v := range m.Counters {
- _ = k
- _ = v
- l = v.Size()
- mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
- n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
- }
- }
- return n
-}
-
-func (m *Device) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- if m.Basic != nil {
- l = m.Basic.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- return n
-}
-
-func (m *DeviceAllocationConfiguration) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Source)
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Requests) > 0 {
- for _, s := range m.Requests {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- l = m.DeviceConfiguration.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *DeviceAllocationResult) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Results) > 0 {
- for _, e := range m.Results {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- if len(m.Config) > 0 {
- for _, e := range m.Config {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *DeviceAttribute) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.IntValue != nil {
- n += 1 + sovGenerated(uint64(*m.IntValue))
- }
- if m.BoolValue != nil {
- n += 2
- }
- if m.StringValue != nil {
- l = len(*m.StringValue)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.VersionValue != nil {
- l = len(*m.VersionValue)
- n += 1 + l + sovGenerated(uint64(l))
- }
- return n
-}
-
-func (m *DeviceClaim) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Requests) > 0 {
- for _, e := range m.Requests {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- if len(m.Constraints) > 0 {
- for _, e := range m.Constraints {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- if len(m.Config) > 0 {
- for _, e := range m.Config {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *DeviceClaimConfiguration) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Requests) > 0 {
- for _, s := range m.Requests {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- l = m.DeviceConfiguration.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *DeviceClass) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *DeviceClassConfiguration) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.DeviceConfiguration.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *DeviceClassList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *DeviceClassSpec) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Selectors) > 0 {
- for _, e := range m.Selectors {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- if len(m.Config) > 0 {
- for _, e := range m.Config {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *DeviceConfiguration) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Opaque != nil {
- l = m.Opaque.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- return n
-}
-
-func (m *DeviceConstraint) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Requests) > 0 {
- for _, s := range m.Requests {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- if m.MatchAttribute != nil {
- l = len(*m.MatchAttribute)
- n += 1 + l + sovGenerated(uint64(l))
- }
- return n
-}
-
-func (m *DeviceCounterConsumption) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.CounterSet)
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Counters) > 0 {
- for k, v := range m.Counters {
- _ = k
- _ = v
- l = v.Size()
- mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
- n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
- }
- }
- return n
-}
-
-func (m *DeviceRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.DeviceClassName)
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Selectors) > 0 {
- for _, e := range m.Selectors {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- l = len(m.AllocationMode)
- n += 1 + l + sovGenerated(uint64(l))
- n += 1 + sovGenerated(uint64(m.Count))
- if m.AdminAccess != nil {
- n += 2
- }
- if len(m.FirstAvailable) > 0 {
- for _, e := range m.FirstAvailable {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- if len(m.Tolerations) > 0 {
- for _, e := range m.Tolerations {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *DeviceRequestAllocationResult) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Request)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Driver)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Pool)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Device)
- n += 1 + l + sovGenerated(uint64(l))
- if m.AdminAccess != nil {
- n += 2
- }
- if len(m.Tolerations) > 0 {
- for _, e := range m.Tolerations {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *DeviceSelector) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.CEL != nil {
- l = m.CEL.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- return n
-}
-
-func (m *DeviceSubRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.DeviceClassName)
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Selectors) > 0 {
- for _, e := range m.Selectors {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- l = len(m.AllocationMode)
- n += 1 + l + sovGenerated(uint64(l))
- n += 1 + sovGenerated(uint64(m.Count))
- if len(m.Tolerations) > 0 {
- for _, e := range m.Tolerations {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *DeviceTaint) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Value)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Effect)
- n += 1 + l + sovGenerated(uint64(l))
- if m.TimeAdded != nil {
- l = m.TimeAdded.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- return n
-}
-
-func (m *DeviceTaintRule) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *DeviceTaintRuleList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *DeviceTaintRuleSpec) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.DeviceSelector != nil {
- l = m.DeviceSelector.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- l = m.Taint.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *DeviceTaintSelector) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.DeviceClassName != nil {
- l = len(*m.DeviceClassName)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.Driver != nil {
- l = len(*m.Driver)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.Pool != nil {
- l = len(*m.Pool)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.Device != nil {
- l = len(*m.Device)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if len(m.Selectors) > 0 {
- for _, e := range m.Selectors {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *DeviceToleration) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Operator)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Value)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Effect)
- n += 1 + l + sovGenerated(uint64(l))
- if m.TolerationSeconds != nil {
- n += 1 + sovGenerated(uint64(*m.TolerationSeconds))
- }
- return n
-}
-
-func (m *NetworkDeviceData) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.InterfaceName)
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.IPs) > 0 {
- for _, s := range m.IPs {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- l = len(m.HardwareAddress)
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *OpaqueDeviceConfiguration) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Driver)
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Parameters.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *ResourceClaim) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Status.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *ResourceClaimConsumerReference) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.APIGroup)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Resource)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.UID)
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *ResourceClaimList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *ResourceClaimSpec) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Devices.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *ResourceClaimStatus) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Allocation != nil {
- l = m.Allocation.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if len(m.ReservedFor) > 0 {
- for _, e := range m.ReservedFor {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- if len(m.Devices) > 0 {
- for _, e := range m.Devices {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *ResourceClaimTemplate) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *ResourceClaimTemplateList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *ResourceClaimTemplateSpec) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *ResourcePool) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- n += 1 + sovGenerated(uint64(m.Generation))
- n += 1 + sovGenerated(uint64(m.ResourceSliceCount))
- return n
-}
-
-func (m *ResourceSlice) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *ResourceSliceList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *ResourceSliceSpec) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Driver)
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Pool.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.NodeName)
- n += 1 + l + sovGenerated(uint64(l))
- if m.NodeSelector != nil {
- l = m.NodeSelector.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- n += 2
- if len(m.Devices) > 0 {
- for _, e := range m.Devices {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- if m.PerDeviceNodeSelection != nil {
- n += 2
- }
- if len(m.SharedCounters) > 0 {
- for _, e := range m.SharedCounters {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func sovGenerated(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozGenerated(x uint64) (n int) {
- return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (this *AllocatedDeviceStatus) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForConditions := "[]Condition{"
- for _, f := range this.Conditions {
- repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
- }
- repeatedStringForConditions += "}"
- s := strings.Join([]string{`&AllocatedDeviceStatus{`,
- `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
- `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`,
- `Device:` + fmt.Sprintf("%v", this.Device) + `,`,
- `Conditions:` + repeatedStringForConditions + `,`,
- `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1) + `,`,
- `NetworkData:` + strings.Replace(this.NetworkData.String(), "NetworkDeviceData", "NetworkDeviceData", 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *AllocationResult) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&AllocationResult{`,
- `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceAllocationResult", "DeviceAllocationResult", 1), `&`, ``, 1) + `,`,
- `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *BasicDevice) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForConsumesCounters := "[]DeviceCounterConsumption{"
- for _, f := range this.ConsumesCounters {
- repeatedStringForConsumesCounters += strings.Replace(strings.Replace(f.String(), "DeviceCounterConsumption", "DeviceCounterConsumption", 1), `&`, ``, 1) + ","
- }
- repeatedStringForConsumesCounters += "}"
- repeatedStringForTaints := "[]DeviceTaint{"
- for _, f := range this.Taints {
- repeatedStringForTaints += strings.Replace(strings.Replace(f.String(), "DeviceTaint", "DeviceTaint", 1), `&`, ``, 1) + ","
- }
- repeatedStringForTaints += "}"
- keysForAttributes := make([]string, 0, len(this.Attributes))
- for k := range this.Attributes {
- keysForAttributes = append(keysForAttributes, string(k))
- }
- github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
- mapStringForAttributes := "map[QualifiedName]DeviceAttribute{"
- for _, k := range keysForAttributes {
- mapStringForAttributes += fmt.Sprintf("%v: %v,", k, this.Attributes[QualifiedName(k)])
- }
- mapStringForAttributes += "}"
- keysForCapacity := make([]string, 0, len(this.Capacity))
- for k := range this.Capacity {
- keysForCapacity = append(keysForCapacity, string(k))
- }
- github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
- mapStringForCapacity := "map[QualifiedName]resource.Quantity{"
- for _, k := range keysForCapacity {
- mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[QualifiedName(k)])
- }
- mapStringForCapacity += "}"
- s := strings.Join([]string{`&BasicDevice{`,
- `Attributes:` + mapStringForAttributes + `,`,
- `Capacity:` + mapStringForCapacity + `,`,
- `ConsumesCounters:` + repeatedStringForConsumesCounters + `,`,
- `NodeName:` + valueToStringGenerated(this.NodeName) + `,`,
- `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`,
- `AllNodes:` + valueToStringGenerated(this.AllNodes) + `,`,
- `Taints:` + repeatedStringForTaints + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *CELDeviceSelector) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&CELDeviceSelector{`,
- `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *Counter) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&Counter{`,
- `Value:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *CounterSet) String() string {
- if this == nil {
- return "nil"
- }
- keysForCounters := make([]string, 0, len(this.Counters))
- for k := range this.Counters {
- keysForCounters = append(keysForCounters, k)
- }
- github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
- mapStringForCounters := "map[string]Counter{"
- for _, k := range keysForCounters {
- mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k])
- }
- mapStringForCounters += "}"
- s := strings.Join([]string{`&CounterSet{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Counters:` + mapStringForCounters + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *Device) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&Device{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Basic:` + strings.Replace(this.Basic.String(), "BasicDevice", "BasicDevice", 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceAllocationConfiguration) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&DeviceAllocationConfiguration{`,
- `Source:` + fmt.Sprintf("%v", this.Source) + `,`,
- `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`,
- `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceAllocationResult) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForResults := "[]DeviceRequestAllocationResult{"
- for _, f := range this.Results {
- repeatedStringForResults += strings.Replace(strings.Replace(f.String(), "DeviceRequestAllocationResult", "DeviceRequestAllocationResult", 1), `&`, ``, 1) + ","
- }
- repeatedStringForResults += "}"
- repeatedStringForConfig := "[]DeviceAllocationConfiguration{"
- for _, f := range this.Config {
- repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceAllocationConfiguration", "DeviceAllocationConfiguration", 1), `&`, ``, 1) + ","
- }
- repeatedStringForConfig += "}"
- s := strings.Join([]string{`&DeviceAllocationResult{`,
- `Results:` + repeatedStringForResults + `,`,
- `Config:` + repeatedStringForConfig + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceAttribute) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&DeviceAttribute{`,
- `IntValue:` + valueToStringGenerated(this.IntValue) + `,`,
- `BoolValue:` + valueToStringGenerated(this.BoolValue) + `,`,
- `StringValue:` + valueToStringGenerated(this.StringValue) + `,`,
- `VersionValue:` + valueToStringGenerated(this.VersionValue) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceClaim) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForRequests := "[]DeviceRequest{"
- for _, f := range this.Requests {
- repeatedStringForRequests += strings.Replace(strings.Replace(f.String(), "DeviceRequest", "DeviceRequest", 1), `&`, ``, 1) + ","
- }
- repeatedStringForRequests += "}"
- repeatedStringForConstraints := "[]DeviceConstraint{"
- for _, f := range this.Constraints {
- repeatedStringForConstraints += strings.Replace(strings.Replace(f.String(), "DeviceConstraint", "DeviceConstraint", 1), `&`, ``, 1) + ","
- }
- repeatedStringForConstraints += "}"
- repeatedStringForConfig := "[]DeviceClaimConfiguration{"
- for _, f := range this.Config {
- repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClaimConfiguration", "DeviceClaimConfiguration", 1), `&`, ``, 1) + ","
- }
- repeatedStringForConfig += "}"
- s := strings.Join([]string{`&DeviceClaim{`,
- `Requests:` + repeatedStringForRequests + `,`,
- `Constraints:` + repeatedStringForConstraints + `,`,
- `Config:` + repeatedStringForConfig + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceClaimConfiguration) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&DeviceClaimConfiguration{`,
- `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`,
- `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceClass) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&DeviceClass{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceClassSpec", "DeviceClassSpec", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceClassConfiguration) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&DeviceClassConfiguration{`,
- `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceClassList) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForItems := "[]DeviceClass{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceClass", "DeviceClass", 1), `&`, ``, 1) + ","
- }
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&DeviceClassList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceClassSpec) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForSelectors := "[]DeviceSelector{"
- for _, f := range this.Selectors {
- repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
- }
- repeatedStringForSelectors += "}"
- repeatedStringForConfig := "[]DeviceClassConfiguration{"
- for _, f := range this.Config {
- repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClassConfiguration", "DeviceClassConfiguration", 1), `&`, ``, 1) + ","
- }
- repeatedStringForConfig += "}"
- s := strings.Join([]string{`&DeviceClassSpec{`,
- `Selectors:` + repeatedStringForSelectors + `,`,
- `Config:` + repeatedStringForConfig + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceConfiguration) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&DeviceConfiguration{`,
- `Opaque:` + strings.Replace(this.Opaque.String(), "OpaqueDeviceConfiguration", "OpaqueDeviceConfiguration", 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceConstraint) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&DeviceConstraint{`,
- `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`,
- `MatchAttribute:` + valueToStringGenerated(this.MatchAttribute) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceCounterConsumption) String() string {
- if this == nil {
- return "nil"
- }
- keysForCounters := make([]string, 0, len(this.Counters))
- for k := range this.Counters {
- keysForCounters = append(keysForCounters, k)
- }
- github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
- mapStringForCounters := "map[string]Counter{"
- for _, k := range keysForCounters {
- mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k])
- }
- mapStringForCounters += "}"
- s := strings.Join([]string{`&DeviceCounterConsumption{`,
- `CounterSet:` + fmt.Sprintf("%v", this.CounterSet) + `,`,
- `Counters:` + mapStringForCounters + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceRequest) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForSelectors := "[]DeviceSelector{"
- for _, f := range this.Selectors {
- repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
- }
- repeatedStringForSelectors += "}"
- repeatedStringForFirstAvailable := "[]DeviceSubRequest{"
- for _, f := range this.FirstAvailable {
- repeatedStringForFirstAvailable += strings.Replace(strings.Replace(f.String(), "DeviceSubRequest", "DeviceSubRequest", 1), `&`, ``, 1) + ","
- }
- repeatedStringForFirstAvailable += "}"
- repeatedStringForTolerations := "[]DeviceToleration{"
- for _, f := range this.Tolerations {
- repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + ","
- }
- repeatedStringForTolerations += "}"
- s := strings.Join([]string{`&DeviceRequest{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`,
- `Selectors:` + repeatedStringForSelectors + `,`,
- `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`,
- `Count:` + fmt.Sprintf("%v", this.Count) + `,`,
- `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`,
- `FirstAvailable:` + repeatedStringForFirstAvailable + `,`,
- `Tolerations:` + repeatedStringForTolerations + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceRequestAllocationResult) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForTolerations := "[]DeviceToleration{"
- for _, f := range this.Tolerations {
- repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + ","
- }
- repeatedStringForTolerations += "}"
- s := strings.Join([]string{`&DeviceRequestAllocationResult{`,
- `Request:` + fmt.Sprintf("%v", this.Request) + `,`,
- `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
- `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`,
- `Device:` + fmt.Sprintf("%v", this.Device) + `,`,
- `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`,
- `Tolerations:` + repeatedStringForTolerations + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceSelector) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&DeviceSelector{`,
- `CEL:` + strings.Replace(this.CEL.String(), "CELDeviceSelector", "CELDeviceSelector", 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceSubRequest) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForSelectors := "[]DeviceSelector{"
- for _, f := range this.Selectors {
- repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
- }
- repeatedStringForSelectors += "}"
- repeatedStringForTolerations := "[]DeviceToleration{"
- for _, f := range this.Tolerations {
- repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + ","
- }
- repeatedStringForTolerations += "}"
- s := strings.Join([]string{`&DeviceSubRequest{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`,
- `Selectors:` + repeatedStringForSelectors + `,`,
- `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`,
- `Count:` + fmt.Sprintf("%v", this.Count) + `,`,
- `Tolerations:` + repeatedStringForTolerations + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceTaint) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&DeviceTaint{`,
- `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
- `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
- `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`,
- `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "v1.Time", 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceTaintRule) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&DeviceTaintRule{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceTaintRuleSpec", "DeviceTaintRuleSpec", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceTaintRuleList) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForItems := "[]DeviceTaintRule{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceTaintRule", "DeviceTaintRule", 1), `&`, ``, 1) + ","
- }
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&DeviceTaintRuleList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceTaintRuleSpec) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&DeviceTaintRuleSpec{`,
- `DeviceSelector:` + strings.Replace(this.DeviceSelector.String(), "DeviceTaintSelector", "DeviceTaintSelector", 1) + `,`,
- `Taint:` + strings.Replace(strings.Replace(this.Taint.String(), "DeviceTaint", "DeviceTaint", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceTaintSelector) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForSelectors := "[]DeviceSelector{"
- for _, f := range this.Selectors {
- repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
- }
- repeatedStringForSelectors += "}"
- s := strings.Join([]string{`&DeviceTaintSelector{`,
- `DeviceClassName:` + valueToStringGenerated(this.DeviceClassName) + `,`,
- `Driver:` + valueToStringGenerated(this.Driver) + `,`,
- `Pool:` + valueToStringGenerated(this.Pool) + `,`,
- `Device:` + valueToStringGenerated(this.Device) + `,`,
- `Selectors:` + repeatedStringForSelectors + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *DeviceToleration) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&DeviceToleration{`,
- `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
- `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
- `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
- `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`,
- `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *NetworkDeviceData) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&NetworkDeviceData{`,
- `InterfaceName:` + fmt.Sprintf("%v", this.InterfaceName) + `,`,
- `IPs:` + fmt.Sprintf("%v", this.IPs) + `,`,
- `HardwareAddress:` + fmt.Sprintf("%v", this.HardwareAddress) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *OpaqueDeviceConfiguration) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&OpaqueDeviceConfiguration{`,
- `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
- `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ResourceClaim) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&ResourceClaim{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`,
- `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ResourceClaimConsumerReference) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&ResourceClaimConsumerReference{`,
- `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`,
- `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `UID:` + fmt.Sprintf("%v", this.UID) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ResourceClaimList) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForItems := "[]ResourceClaim{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + ","
- }
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&ResourceClaimList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ResourceClaimSpec) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&ResourceClaimSpec{`,
- `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceClaim", "DeviceClaim", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ResourceClaimStatus) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForReservedFor := "[]ResourceClaimConsumerReference{"
- for _, f := range this.ReservedFor {
- repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + ","
- }
- repeatedStringForReservedFor += "}"
- repeatedStringForDevices := "[]AllocatedDeviceStatus{"
- for _, f := range this.Devices {
- repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "AllocatedDeviceStatus", "AllocatedDeviceStatus", 1), `&`, ``, 1) + ","
- }
- repeatedStringForDevices += "}"
- s := strings.Join([]string{`&ResourceClaimStatus{`,
- `Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`,
- `ReservedFor:` + repeatedStringForReservedFor + `,`,
- `Devices:` + repeatedStringForDevices + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ResourceClaimTemplate) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&ResourceClaimTemplate{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ResourceClaimTemplateList) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForItems := "[]ResourceClaimTemplate{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimTemplate", "ResourceClaimTemplate", 1), `&`, ``, 1) + ","
- }
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&ResourceClaimTemplateList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ResourceClaimTemplateSpec) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&ResourceClaimTemplateSpec{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ResourcePool) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&ResourcePool{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`,
- `ResourceSliceCount:` + fmt.Sprintf("%v", this.ResourceSliceCount) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ResourceSlice) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&ResourceSlice{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceSliceSpec", "ResourceSliceSpec", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ResourceSliceList) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForItems := "[]ResourceSlice{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceSlice", "ResourceSlice", 1), `&`, ``, 1) + ","
- }
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&ResourceSliceList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ResourceSliceSpec) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForDevices := "[]Device{"
- for _, f := range this.Devices {
- repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "Device", "Device", 1), `&`, ``, 1) + ","
- }
- repeatedStringForDevices += "}"
- repeatedStringForSharedCounters := "[]CounterSet{"
- for _, f := range this.SharedCounters {
- repeatedStringForSharedCounters += strings.Replace(strings.Replace(f.String(), "CounterSet", "CounterSet", 1), `&`, ``, 1) + ","
- }
- repeatedStringForSharedCounters += "}"
- s := strings.Join([]string{`&ResourceSliceSpec{`,
- `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
- `Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`,
- `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
- `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`,
- `AllNodes:` + fmt.Sprintf("%v", this.AllNodes) + `,`,
- `Devices:` + repeatedStringForDevices + `,`,
- `PerDeviceNodeSelection:` + valueToStringGenerated(this.PerDeviceNodeSelection) + `,`,
- `SharedCounters:` + repeatedStringForSharedCounters + `,`,
- `}`,
- }, "")
- return s
-}
-func valueToStringGenerated(v interface{}) string {
- rv := reflect.ValueOf(v)
- if rv.IsNil() {
- return "nil"
- }
- pv := reflect.Indirect(rv).Interface()
- return fmt.Sprintf("*%v", pv)
-}
-func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AllocatedDeviceStatus: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AllocatedDeviceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Driver = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Pool = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Device = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Conditions = append(m.Conditions, v1.Condition{})
- if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Data == nil {
- m.Data = &runtime.RawExtension{}
- }
- if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NetworkData", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.NetworkData == nil {
- m.NetworkData = &NetworkDeviceData{}
- }
- if err := m.NetworkData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AllocationResult) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.NodeSelector == nil {
- m.NodeSelector = &v11.NodeSelector{}
- }
- if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *BasicDevice) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Attributes == nil {
- m.Attributes = make(map[QualifiedName]DeviceAttribute)
- }
- var mapkey QualifiedName
- mapvalue := &DeviceAttribute{}
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if postmsgIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = &DeviceAttribute{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
- m.Attributes[QualifiedName(mapkey)] = *mapvalue
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Capacity == nil {
- m.Capacity = make(map[QualifiedName]resource.Quantity)
- }
- var mapkey QualifiedName
- mapvalue := &resource.Quantity{}
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if postmsgIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = &resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
- m.Capacity[QualifiedName(mapkey)] = *mapvalue
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ConsumesCounters", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ConsumesCounters = append(m.ConsumesCounters, DeviceCounterConsumption{})
- if err := m.ConsumesCounters[len(m.ConsumesCounters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(dAtA[iNdEx:postIndex])
- m.NodeName = &s
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.NodeSelector == nil {
- m.NodeSelector = &v11.NodeSelector{}
- }
- if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- b := bool(v != 0)
- m.AllNodes = &b
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Taints = append(m.Taints, DeviceTaint{})
- if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Expression = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Counter) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Counter: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Counter: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *CounterSet) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CounterSet: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CounterSet: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Counters", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Counters == nil {
- m.Counters = make(map[string]Counter)
- }
- var mapkey string
- mapvalue := &Counter{}
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if postmsgIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = &Counter{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
- m.Counters[mapkey] = *mapvalue
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Device) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Device: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Basic == nil {
- m.Basic = &BasicDevice{}
- }
- if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Results = append(m.Results, DeviceRequestAllocationResult{})
- if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Config = append(m.Config, DeviceAllocationConfiguration{})
- if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
- }
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.IntValue = &v
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- b := bool(v != 0)
- m.BoolValue = &b
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(dAtA[iNdEx:postIndex])
- m.StringValue = &s
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(dAtA[iNdEx:postIndex])
- m.VersionValue = &s
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Requests = append(m.Requests, DeviceRequest{})
- if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Constraints = append(m.Constraints, DeviceConstraint{})
- if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Config = append(m.Config, DeviceClaimConfiguration{})
- if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceClass) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceClassList) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Items = append(m.Items, DeviceClass{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Selectors = append(m.Selectors, DeviceSelector{})
- if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Config = append(m.Config, DeviceClassConfiguration{})
- if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Opaque == nil {
- m.Opaque = &OpaqueDeviceConfiguration{}
- }
- if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := FullyQualifiedName(dAtA[iNdEx:postIndex])
- m.MatchAttribute = &s
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceCounterConsumption) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceCounterConsumption: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceCounterConsumption: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CounterSet", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.CounterSet = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Counters", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Counters == nil {
- m.Counters = make(map[string]Counter)
- }
- var mapkey string
- mapvalue := &Counter{}
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if postmsgIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = &Counter{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
- m.Counters[mapkey] = *mapvalue
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DeviceClassName = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Selectors = append(m.Selectors, DeviceSelector{})
- if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
- }
- m.Count = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Count |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- b := bool(v != 0)
- m.AdminAccess = &b
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field FirstAvailable", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.FirstAvailable = append(m.FirstAvailable, DeviceSubRequest{})
- if err := m.FirstAvailable[len(m.FirstAvailable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 8:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Tolerations = append(m.Tolerations, DeviceToleration{})
- if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Request = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Driver = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Pool = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Device = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- b := bool(v != 0)
- m.AdminAccess = &b
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Tolerations = append(m.Tolerations, DeviceToleration{})
- if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceSelector) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.CEL == nil {
- m.CEL = &CELDeviceSelector{}
- }
- if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceSubRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceSubRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceSubRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DeviceClassName = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Selectors = append(m.Selectors, DeviceSelector{})
- if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
- }
- m.Count = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Count |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Tolerations = append(m.Tolerations, DeviceToleration{})
- if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceTaint) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceTaint: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceTaint: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Value = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Effect = DeviceTaintEffect(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.TimeAdded == nil {
- m.TimeAdded = &v1.Time{}
- }
- if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceTaintRule) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceTaintRule: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceTaintRule: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceTaintRuleList) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceTaintRuleList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceTaintRuleList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Items = append(m.Items, DeviceTaintRule{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceTaintRuleSpec) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceTaintRuleSpec: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceTaintRuleSpec: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeviceSelector", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.DeviceSelector == nil {
- m.DeviceSelector = &DeviceTaintSelector{}
- }
- if err := m.DeviceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Taint", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Taint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceTaintSelector) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceTaintSelector: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceTaintSelector: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(dAtA[iNdEx:postIndex])
- m.DeviceClassName = &s
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(dAtA[iNdEx:postIndex])
- m.Driver = &s
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(dAtA[iNdEx:postIndex])
- m.Pool = &s
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(dAtA[iNdEx:postIndex])
- m.Device = &s
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Selectors = append(m.Selectors, DeviceSelector{})
- if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceToleration) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceToleration: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceToleration: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Operator = DeviceTolerationOperator(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Value = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Effect = DeviceTaintEffect(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TolerationSeconds", wireType)
- }
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.TolerationSeconds = &v
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *NetworkDeviceData) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: NetworkDeviceData: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: NetworkDeviceData: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field InterfaceName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.InterfaceName = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IPs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IPs = append(m.IPs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field HardwareAddress", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.HardwareAddress = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Driver = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResourceClaim) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.APIGroup = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Resource = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResourceClaimList) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Items = append(m.Items, ResourceClaim{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Allocation == nil {
- m.Allocation = &AllocationResult{}
- }
- if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{})
- if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Devices = append(m.Devices, AllocatedDeviceStatus{})
- if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
+func (this *DeviceTaintRuleList) String() string {
+ if this == nil {
+ return "nil"
}
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
+ repeatedStringForItems := "[]DeviceTaintRule{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceTaintRule", "DeviceTaintRule", 1), `&`, ``, 1) + ","
}
- return nil
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&DeviceTaintRuleList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
}
-func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
+func (this *DeviceTaintRuleSpec) String() string {
+ if this == nil {
+ return "nil"
}
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
+ s := strings.Join([]string{`&DeviceTaintRuleSpec{`,
+ `DeviceSelector:` + strings.Replace(this.DeviceSelector.String(), "DeviceTaintSelector", "DeviceTaintSelector", 1) + `,`,
+ `Taint:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Taint), "DeviceTaint", "DeviceTaint", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceTaintSelector) String() string {
+ if this == nil {
+ return "nil"
}
- return nil
+ repeatedStringForSelectors := "[]DeviceSelector{"
+ for _, f := range this.Selectors {
+ repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForSelectors += "}"
+ s := strings.Join([]string{`&DeviceTaintSelector{`,
+ `DeviceClassName:` + valueToStringGenerated(this.DeviceClassName) + `,`,
+ `Driver:` + valueToStringGenerated(this.Driver) + `,`,
+ `Pool:` + valueToStringGenerated(this.Pool) + `,`,
+ `Device:` + valueToStringGenerated(this.Device) + `,`,
+ `Selectors:` + repeatedStringForSelectors + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
}
-func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error {
+func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -10968,50 +860,17 @@ func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group")
+ return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -11021,25 +880,23 @@ func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Items = append(m.Items, ResourceClaimTemplate{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Expression = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -11062,7 +919,7 @@ func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error {
+func (m *DeviceSelector) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -11075,58 +932,25 @@ func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -11153,7 +977,10 @@ func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if m.CEL == nil {
+ m.CEL = &CELDeviceSelector{}
+ }
+ if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -11178,7 +1005,7 @@ func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *ResourcePool) Unmarshal(dAtA []byte) error {
+func (m *DeviceTaint) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -11201,15 +1028,15 @@ func (m *ResourcePool) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: ResourcePool: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceTaint: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourcePool: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceTaint: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -11237,13 +1064,13 @@ func (m *ResourcePool) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
+ m.Key = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType)
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
- m.Generation = 0
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -11253,16 +1080,61 @@ func (m *ResourcePool) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Generation |= int64(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceSliceCount", wireType)
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Effect = DeviceTaintEffect(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType)
}
- m.ResourceSliceCount = 0
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -11272,11 +1144,28 @@ func (m *ResourcePool) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.ResourceSliceCount |= int64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.TimeAdded == nil {
+ m.TimeAdded = &v1.Time{}
+ }
+ if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -11298,7 +1187,7 @@ func (m *ResourcePool) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *ResourceSlice) Unmarshal(dAtA []byte) error {
+func (m *DeviceTaintRule) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -11321,10 +1210,10 @@ func (m *ResourceSlice) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: ResourceSlice: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceTaintRule: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceSlice: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceTaintRule: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
@@ -11414,7 +1303,7 @@ func (m *ResourceSlice) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *ResourceSliceList) Unmarshal(dAtA []byte) error {
+func (m *DeviceTaintRuleList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -11437,10 +1326,10 @@ func (m *ResourceSliceList) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: ResourceSliceList: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceTaintRuleList: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceSliceList: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceTaintRuleList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
@@ -11505,7 +1394,7 @@ func (m *ResourceSliceList) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Items = append(m.Items, ResourceSlice{})
+ m.Items = append(m.Items, DeviceTaintRule{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@@ -11531,7 +1420,7 @@ func (m *ResourceSliceList) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error {
+func (m *DeviceTaintRuleSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -11554,17 +1443,17 @@ func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: ResourceSliceSpec: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceTaintRuleSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceSliceSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceTaintRuleSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceSelector", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -11574,27 +1463,31 @@ func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Driver = string(dAtA[iNdEx:postIndex])
+ if m.DeviceSelector == nil {
+ m.DeviceSelector = &DeviceTaintSelector{}
+ }
+ if err := m.DeviceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Taint", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -11621,13 +1514,63 @@ func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Taint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 3:
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceTaintSelector) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceTaintSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceTaintSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -11655,13 +1598,14 @@ func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.NodeName = string(dAtA[iNdEx:postIndex])
+ s := string(dAtA[iNdEx:postIndex])
+ m.DeviceClassName = &s
iNdEx = postIndex
- case 4:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -11671,53 +1615,30 @@ func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.NodeSelector == nil {
- m.NodeSelector = &v11.NodeSelector{}
- }
- if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Driver = &s
iNdEx = postIndex
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AllNodes = bool(v != 0)
- case 6:
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -11727,31 +1648,30 @@ func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Devices = append(m.Devices, Device{})
- if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Pool = &s
iNdEx = postIndex
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PerDeviceNodeSelection", wireType)
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
}
- var v int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -11761,16 +1681,28 @@ func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- b := bool(v != 0)
- m.PerDeviceNodeSelection = &b
- case 8:
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Device = &s
+ iNdEx = postIndex
+ case 5:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SharedCounters", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -11797,8 +1729,8 @@ func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.SharedCounters = append(m.SharedCounters, CounterSet{})
- if err := m.SharedCounters[len(m.SharedCounters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Selectors = append(m.Selectors, DeviceSelector{})
+ if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/vendor/k8s.io/api/resource/v1alpha3/generated.proto
index 103cafc6a..d33447900 100644
--- a/vendor/k8s.io/api/resource/v1alpha3/generated.proto
+++ b/vendor/k8s.io/api/resource/v1alpha3/generated.proto
@@ -21,8 +21,6 @@ syntax = "proto2";
package k8s.io.api.resource.v1alpha3;
-import "k8s.io/api/core/v1/generated.proto";
-import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
@@ -30,149 +28,6 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "k8s.io/api/resource/v1alpha3";
-// AllocatedDeviceStatus contains the status of an allocated device, if the
-// driver chooses to report it. This may include driver-specific information.
-message AllocatedDeviceStatus {
- // Driver specifies the name of the DRA driver whose kubelet
- // plugin should be invoked to process the allocation once the claim is
- // needed on a node.
- //
- // Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
- //
- // +required
- optional string driver = 1;
-
- // This name together with the driver name and the device name field
- // identify which device was allocated (`//`).
- //
- // Must not be longer than 253 characters and may contain one or more
- // DNS sub-domains separated by slashes.
- //
- // +required
- optional string pool = 2;
-
- // Device references one device instance via its name in the driver's
- // resource pool. It must be a DNS label.
- //
- // +required
- optional string device = 3;
-
- // Conditions contains the latest observation of the device's state.
- // If the device has been configured according to the class and claim
- // config references, the `Ready` condition should be True.
- //
- // Must not contain more than 8 entries.
- //
- // +optional
- // +listType=map
- // +listMapKey=type
- repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 4;
-
- // Data contains arbitrary driver-specific data.
- //
- // The length of the raw data must be smaller or equal to 10 Ki.
- //
- // +optional
- optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 5;
-
- // NetworkData contains network-related information specific to the device.
- //
- // +optional
- optional NetworkDeviceData networkData = 6;
-}
-
-// AllocationResult contains attributes of an allocated resource.
-message AllocationResult {
- // Devices is the result of allocating devices.
- //
- // +optional
- optional DeviceAllocationResult devices = 1;
-
- // NodeSelector defines where the allocated resources are available. If
- // unset, they are available everywhere.
- //
- // +optional
- optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 3;
-}
-
-// BasicDevice defines one device instance.
-message BasicDevice {
- // Attributes defines the set of attributes for this device.
- // The name of each attribute must be unique in that set.
- //
- // The maximum number of attributes and capacities combined is 32.
- //
- // +optional
- map attributes = 1;
-
- // Capacity defines the set of capacities for this device.
- // The name of each capacity must be unique in that set.
- //
- // The maximum number of attributes and capacities combined is 32.
- //
- // +optional
- map capacity = 2;
-
- // ConsumesCounters defines a list of references to sharedCounters
- // and the set of counters that the device will
- // consume from those counter sets.
- //
- // There can only be a single entry per counterSet.
- //
- // The total number of device counter consumption entries
- // must be <= 32. In addition, the total number in the
- // entire ResourceSlice must be <= 1024 (for example,
- // 64 devices with 16 counters each).
- //
- // +optional
- // +listType=atomic
- // +featureGate=DRAPartitionableDevices
- repeated DeviceCounterConsumption consumesCounters = 3;
-
- // NodeName identifies the node where the device is available.
- //
- // Must only be set if Spec.PerDeviceNodeSelection is set to true.
- // At most one of NodeName, NodeSelector and AllNodes can be set.
- //
- // +optional
- // +oneOf=DeviceNodeSelection
- // +featureGate=DRAPartitionableDevices
- optional string nodeName = 4;
-
- // NodeSelector defines the nodes where the device is available.
- //
- // Must only be set if Spec.PerDeviceNodeSelection is set to true.
- // At most one of NodeName, NodeSelector and AllNodes can be set.
- //
- // +optional
- // +oneOf=DeviceNodeSelection
- // +featureGate=DRAPartitionableDevices
- optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 5;
-
- // AllNodes indicates that all nodes have access to the device.
- //
- // Must only be set if Spec.PerDeviceNodeSelection is set to true.
- // At most one of NodeName, NodeSelector and AllNodes can be set.
- //
- // +optional
- // +oneOf=DeviceNodeSelection
- // +featureGate=DRAPartitionableDevices
- optional bool allNodes = 6;
-
- // If specified, these are the driver-defined taints.
- //
- // The maximum number of taints is 4.
- //
- // This is an alpha field and requires enabling the DRADeviceTaints
- // feature gate.
- //
- // +optional
- // +listType=atomic
- // +featureGate=DRADeviceTaints
- repeated DeviceTaint taints = 7;
-}
-
// CELDeviceSelector contains a CEL expression for selecting a device.
message CELDeviceSelector {
// Expression is a CEL expression which evaluates a single device. It
@@ -230,503 +85,6 @@ message CELDeviceSelector {
optional string expression = 1;
}
-// Counter describes a quantity associated with a device.
-message Counter {
- // Value defines how much of a certain device counter is available.
- //
- // +required
- optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1;
-}
-
-// CounterSet defines a named set of counters
-// that are available to be used by devices defined in the
-// ResourceSlice.
-//
-// The counters are not allocatable by themselves, but
-// can be referenced by devices. When a device is allocated,
-// the portion of counters it uses will no longer be available for use
-// by other devices.
-message CounterSet {
- // CounterSet is the name of the set from which the
- // counters defined will be consumed.
- //
- // +required
- optional string name = 1;
-
- // Counters defines the counters that will be consumed by the device.
- // The name of each counter must be unique in that set and must be a DNS label.
- //
- // To ensure this uniqueness, capacities defined by the vendor
- // must be listed without the driver name as domain prefix in
- // their name. All others must be listed with their domain prefix.
- //
- // The maximum number of counters is 32.
- //
- // +required
- map counters = 2;
-}
-
-// Device represents one individual hardware instance that can be selected based
-// on its attributes. Besides the name, exactly one field must be set.
-message Device {
- // Name is unique identifier among all devices managed by
- // the driver in the pool. It must be a DNS label.
- //
- // +required
- optional string name = 1;
-
- // Basic defines one device instance.
- //
- // +optional
- // +oneOf=deviceType
- optional BasicDevice basic = 2;
-}
-
-// DeviceAllocationConfiguration gets embedded in an AllocationResult.
-message DeviceAllocationConfiguration {
- // Source records whether the configuration comes from a class and thus
- // is not something that a normal user would have been able to set
- // or from a claim.
- //
- // +required
- optional string source = 1;
-
- // Requests lists the names of requests where the configuration applies.
- // If empty, its applies to all requests.
- //
- // References to subrequests must include the name of the main request
- // and may include the subrequest using the format [/]. If just
- // the main request is given, the configuration applies to all subrequests.
- //
- // +optional
- // +listType=atomic
- repeated string requests = 2;
-
- optional DeviceConfiguration deviceConfiguration = 3;
-}
-
-// DeviceAllocationResult is the result of allocating devices.
-message DeviceAllocationResult {
- // Results lists all allocated devices.
- //
- // +optional
- // +listType=atomic
- repeated DeviceRequestAllocationResult results = 1;
-
- // This field is a combination of all the claim and class configuration parameters.
- // Drivers can distinguish between those based on a flag.
- //
- // This includes configuration parameters for drivers which have no allocated
- // devices in the result because it is up to the drivers which configuration
- // parameters they support. They can silently ignore unknown configuration
- // parameters.
- //
- // +optional
- // +listType=atomic
- repeated DeviceAllocationConfiguration config = 2;
-}
-
-// DeviceAttribute must have exactly one field set.
-message DeviceAttribute {
- // IntValue is a number.
- //
- // +optional
- // +oneOf=ValueType
- optional int64 int = 2;
-
- // BoolValue is a true/false value.
- //
- // +optional
- // +oneOf=ValueType
- optional bool bool = 3;
-
- // StringValue is a string. Must not be longer than 64 characters.
- //
- // +optional
- // +oneOf=ValueType
- optional string string = 4;
-
- // VersionValue is a semantic version according to semver.org spec 2.0.0.
- // Must not be longer than 64 characters.
- //
- // +optional
- // +oneOf=ValueType
- optional string version = 5;
-}
-
-// DeviceClaim defines how to request devices with a ResourceClaim.
-message DeviceClaim {
- // Requests represent individual requests for distinct devices which
- // must all be satisfied. If empty, nothing needs to be allocated.
- //
- // +optional
- // +listType=atomic
- repeated DeviceRequest requests = 1;
-
- // These constraints must be satisfied by the set of devices that get
- // allocated for the claim.
- //
- // +optional
- // +listType=atomic
- repeated DeviceConstraint constraints = 2;
-
- // This field holds configuration for multiple potential drivers which
- // could satisfy requests in this claim. It is ignored while allocating
- // the claim.
- //
- // +optional
- // +listType=atomic
- repeated DeviceClaimConfiguration config = 3;
-}
-
-// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.
-message DeviceClaimConfiguration {
- // Requests lists the names of requests where the configuration applies.
- // If empty, it applies to all requests.
- //
- // References to subrequests must include the name of the main request
- // and may include the subrequest using the format [/]. If just
- // the main request is given, the configuration applies to all subrequests.
- //
- // +optional
- // +listType=atomic
- repeated string requests = 1;
-
- optional DeviceConfiguration deviceConfiguration = 2;
-}
-
-// DeviceClass is a vendor- or admin-provided resource that contains
-// device configuration and selectors. It can be referenced in
-// the device requests of a claim to apply these presets.
-// Cluster scoped.
-//
-// This is an alpha type and requires enabling the DynamicResourceAllocation
-// feature gate.
-message DeviceClass {
- // Standard object metadata
- // +optional
- optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
-
- // Spec defines what can be allocated and how to configure it.
- //
- // This is mutable. Consumers have to be prepared for classes changing
- // at any time, either because they get updated or replaced. Claim
- // allocations are done once based on whatever was set in classes at
- // the time of allocation.
- //
- // Changing the spec automatically increments the metadata.generation number.
- optional DeviceClassSpec spec = 2;
-}
-
-// DeviceClassConfiguration is used in DeviceClass.
-message DeviceClassConfiguration {
- optional DeviceConfiguration deviceConfiguration = 1;
-}
-
-// DeviceClassList is a collection of classes.
-message DeviceClassList {
- // Standard list metadata
- // +optional
- optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
-
- // Items is the list of resource classes.
- repeated DeviceClass items = 2;
-}
-
-// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated
-// and how to configure it.
-message DeviceClassSpec {
- // Each selector must be satisfied by a device which is claimed via this class.
- //
- // +optional
- // +listType=atomic
- repeated DeviceSelector selectors = 1;
-
- // Config defines configuration parameters that apply to each device that is claimed via this class.
- // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor
- // configuration applies to exactly one driver.
- //
- // They are passed to the driver, but are not considered while allocating the claim.
- //
- // +optional
- // +listType=atomic
- repeated DeviceClassConfiguration config = 2;
-}
-
-// DeviceConfiguration must have exactly one field set. It gets embedded
-// inline in some other structs which have other fields, so field names must
-// not conflict with those.
-message DeviceConfiguration {
- // Opaque provides driver-specific configuration parameters.
- //
- // +optional
- // +oneOf=ConfigurationType
- optional OpaqueDeviceConfiguration opaque = 1;
-}
-
-// DeviceConstraint must have exactly one field set besides Requests.
-message DeviceConstraint {
- // Requests is a list of the one or more requests in this claim which
- // must co-satisfy this constraint. If a request is fulfilled by
- // multiple devices, then all of the devices must satisfy the
- // constraint. If this is not specified, this constraint applies to all
- // requests in this claim.
- //
- // References to subrequests must include the name of the main request
- // and may include the subrequest using the format [/]. If just
- // the main request is given, the constraint applies to all subrequests.
- //
- // +optional
- // +listType=atomic
- repeated string requests = 1;
-
- // MatchAttribute requires that all devices in question have this
- // attribute and that its type and value are the same across those
- // devices.
- //
- // For example, if you specified "dra.example.com/numa" (a hypothetical example!),
- // then only devices in the same NUMA node will be chosen. A device which
- // does not have that attribute will not be chosen. All devices should
- // use a value of the same type for this attribute because that is part of
- // its specification, but if one device doesn't, then it also will not be
- // chosen.
- //
- // Must include the domain qualifier.
- //
- // +optional
- // +oneOf=ConstraintType
- optional string matchAttribute = 2;
-}
-
-// DeviceCounterConsumption defines a set of counters that
-// a device will consume from a CounterSet.
-message DeviceCounterConsumption {
- // CounterSet defines the set from which the
- // counters defined will be consumed.
- //
- // +required
- optional string counterSet = 1;
-
- // Counters defines the Counter that will be consumed by
- // the device.
- //
- // The maximum number counters in a device is 32.
- // In addition, the maximum number of all counters
- // in all devices is 1024 (for example, 64 devices with
- // 16 counters each).
- //
- // +required
- map counters = 2;
-}
-
-// DeviceRequest is a request for devices required for a claim.
-// This is typically a request for a single resource like a device, but can
-// also ask for several identical devices.
-message DeviceRequest {
- // Name can be used to reference this request in a pod.spec.containers[].resources.claims
- // entry and in a constraint of the claim.
- //
- // Must be a DNS label.
- //
- // +required
- optional string name = 1;
-
- // DeviceClassName references a specific DeviceClass, which can define
- // additional configuration and selectors to be inherited by this
- // request.
- //
- // A class is required if no subrequests are specified in the
- // firstAvailable list and no class can be set if subrequests
- // are specified in the firstAvailable list.
- // Which classes are available depends on the cluster.
- //
- // Administrators may use this to restrict which devices may get
- // requested by only installing classes with selectors for permitted
- // devices. If users are free to request anything without restrictions,
- // then administrators can create an empty DeviceClass for users
- // to reference.
- //
- // +optional
- // +oneOf=deviceRequestType
- optional string deviceClassName = 2;
-
- // Selectors define criteria which must be satisfied by a specific
- // device in order for that device to be considered for this
- // request. All selectors must be satisfied for a device to be
- // considered.
- //
- // This field can only be set when deviceClassName is set and no subrequests
- // are specified in the firstAvailable list.
- //
- // +optional
- // +listType=atomic
- repeated DeviceSelector selectors = 3;
-
- // AllocationMode and its related fields define how devices are allocated
- // to satisfy this request. Supported values are:
- //
- // - ExactCount: This request is for a specific number of devices.
- // This is the default. The exact number is provided in the
- // count field.
- //
- // - All: This request is for all of the matching devices in a pool.
- // At least one device must exist on the node for the allocation to succeed.
- // Allocation will fail if some devices are already allocated,
- // unless adminAccess is requested.
- //
- // If AllocationMode is not specified, the default mode is ExactCount. If
- // the mode is ExactCount and count is not specified, the default count is
- // one. Any other requests must specify this field.
- //
- // This field can only be set when deviceClassName is set and no subrequests
- // are specified in the firstAvailable list.
- //
- // More modes may get added in the future. Clients must refuse to handle
- // requests with unknown modes.
- //
- // +optional
- optional string allocationMode = 4;
-
- // Count is used only when the count mode is "ExactCount". Must be greater than zero.
- // If AllocationMode is ExactCount and this field is not specified, the default is one.
- //
- // This field can only be set when deviceClassName is set and no subrequests
- // are specified in the firstAvailable list.
- //
- // +optional
- // +oneOf=AllocationMode
- optional int64 count = 5;
-
- // AdminAccess indicates that this is a claim for administrative access
- // to the device(s). Claims with AdminAccess are expected to be used for
- // monitoring or other management services for a device. They ignore
- // all ordinary claims to the device with respect to access modes and
- // any resource allocations.
- //
- // This field can only be set when deviceClassName is set and no subrequests
- // are specified in the firstAvailable list.
- //
- // This is an alpha field and requires enabling the DRAAdminAccess
- // feature gate. Admin access is disabled if this field is unset or
- // set to false, otherwise it is enabled.
- //
- // +optional
- // +featureGate=DRAAdminAccess
- optional bool adminAccess = 6;
-
- // FirstAvailable contains subrequests, of which exactly one will be
- // satisfied by the scheduler to satisfy this request. It tries to
- // satisfy them in the order in which they are listed here. So if
- // there are two entries in the list, the scheduler will only check
- // the second one if it determines that the first one cannot be used.
- //
- // This field may only be set in the entries of DeviceClaim.Requests.
- //
- // DRA does not yet implement scoring, so the scheduler will
- // select the first set of devices that satisfies all the
- // requests in the claim. And if the requirements can
- // be satisfied on more than one node, other scheduling features
- // will determine which node is chosen. This means that the set of
- // devices allocated to a claim might not be the optimal set
- // available to the cluster. Scoring will be implemented later.
- //
- // +optional
- // +oneOf=deviceRequestType
- // +listType=atomic
- // +featureGate=DRAPrioritizedList
- repeated DeviceSubRequest firstAvailable = 7;
-
- // If specified, the request's tolerations.
- //
- // Tolerations for NoSchedule are required to allocate a
- // device which has a taint with that effect. The same applies
- // to NoExecute.
- //
- // In addition, should any of the allocated devices get tainted
- // with NoExecute after allocation and that effect is not tolerated,
- // then all pods consuming the ResourceClaim get deleted to evict
- // them. The scheduler will not let new pods reserve the claim while
- // it has these tainted devices. Once all pods are evicted, the
- // claim will get deallocated.
- //
- // The maximum number of tolerations is 16.
- //
- // This field can only be set when deviceClassName is set and no subrequests
- // are specified in the firstAvailable list.
- //
- // This is an alpha field and requires enabling the DRADeviceTaints
- // feature gate.
- //
- // +optional
- // +listType=atomic
- // +featureGate=DRADeviceTaints
- repeated DeviceToleration tolerations = 8;
-}
-
-// DeviceRequestAllocationResult contains the allocation result for one request.
-message DeviceRequestAllocationResult {
- // Request is the name of the request in the claim which caused this
- // device to be allocated. If it references a subrequest in the
- // firstAvailable list on a DeviceRequest, this field must
- // include both the name of the main request and the subrequest
- // using the format /.
- //
- // Multiple devices may have been allocated per request.
- //
- // +required
- optional string request = 1;
-
- // Driver specifies the name of the DRA driver whose kubelet
- // plugin should be invoked to process the allocation once the claim is
- // needed on a node.
- //
- // Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
- //
- // +required
- optional string driver = 2;
-
- // This name together with the driver name and the device name field
- // identify which device was allocated (`//`).
- //
- // Must not be longer than 253 characters and may contain one or more
- // DNS sub-domains separated by slashes.
- //
- // +required
- optional string pool = 3;
-
- // Device references one device instance via its name in the driver's
- // resource pool. It must be a DNS label.
- //
- // +required
- optional string device = 4;
-
- // AdminAccess indicates that this device was allocated for
- // administrative access. See the corresponding request field
- // for a definition of mode.
- //
- // This is an alpha field and requires enabling the DRAAdminAccess
- // feature gate. Admin access is disabled if this field is unset or
- // set to false, otherwise it is enabled.
- //
- // +optional
- // +featureGate=DRAAdminAccess
- optional bool adminAccess = 5;
-
- // A copy of all tolerations specified in the request at the time
- // when the device got allocated.
- //
- // The maximum number of tolerations is 16.
- //
- // This is an alpha field and requires enabling the DRADeviceTaints
- // feature gate.
- //
- // +optional
- // +listType=atomic
- // +featureGate=DRADeviceTaints
- repeated DeviceToleration tolerations = 6;
-}
-
// DeviceSelector must have exactly one field set.
message DeviceSelector {
// CEL contains a CEL expression for selecting a device.
@@ -736,104 +94,11 @@ message DeviceSelector {
optional CELDeviceSelector cel = 1;
}
-// DeviceSubRequest describes a request for device provided in the
-// claim.spec.devices.requests[].firstAvailable array. Each
-// is typically a request for a single resource like a device, but can
-// also ask for several identical devices.
-//
-// DeviceSubRequest is similar to Request, but doesn't expose the AdminAccess
-// or FirstAvailable fields, as those can only be set on the top-level request.
-// AdminAccess is not supported for requests with a prioritized list, and
-// recursive FirstAvailable fields are not supported.
-message DeviceSubRequest {
- // Name can be used to reference this subrequest in the list of constraints
- // or the list of configurations for the claim. References must use the
- // format /.
- //
- // Must be a DNS label.
- //
- // +required
- optional string name = 1;
-
- // DeviceClassName references a specific DeviceClass, which can define
- // additional configuration and selectors to be inherited by this
- // subrequest.
- //
- // A class is required. Which classes are available depends on the cluster.
- //
- // Administrators may use this to restrict which devices may get
- // requested by only installing classes with selectors for permitted
- // devices. If users are free to request anything without restrictions,
- // then administrators can create an empty DeviceClass for users
- // to reference.
- //
- // +required
- optional string deviceClassName = 2;
-
- // Selectors define criteria which must be satisfied by a specific
- // device in order for that device to be considered for this
- // request. All selectors must be satisfied for a device to be
- // considered.
- //
- // +optional
- // +listType=atomic
- repeated DeviceSelector selectors = 3;
-
- // AllocationMode and its related fields define how devices are allocated
- // to satisfy this request. Supported values are:
- //
- // - ExactCount: This request is for a specific number of devices.
- // This is the default. The exact number is provided in the
- // count field.
- //
- // - All: This request is for all of the matching devices in a pool.
- // Allocation will fail if some devices are already allocated,
- // unless adminAccess is requested.
- //
- // If AllocationMode is not specified, the default mode is ExactCount. If
- // the mode is ExactCount and count is not specified, the default count is
- // one. Any other requests must specify this field.
- //
- // More modes may get added in the future. Clients must refuse to handle
- // requests with unknown modes.
- //
- // +optional
- optional string allocationMode = 4;
-
- // Count is used only when the count mode is "ExactCount". Must be greater than zero.
- // If AllocationMode is ExactCount and this field is not specified, the default is one.
- //
- // +optional
- // +oneOf=AllocationMode
- optional int64 count = 5;
-
- // If specified, the request's tolerations.
- //
- // Tolerations for NoSchedule are required to allocate a
- // device which has a taint with that effect. The same applies
- // to NoExecute.
- //
- // In addition, should any of the allocated devices get tainted
- // with NoExecute after allocation and that effect is not tolerated,
- // then all pods consuming the ResourceClaim get deleted to evict
- // them. The scheduler will not let new pods reserve the claim while
- // it has these tainted devices. Once all pods are evicted, the
- // claim will get deallocated.
- //
- // The maximum number of tolerations is 16.
- //
- // This is an alpha field and requires enabling the DRADeviceTaints
- // feature gate.
- //
- // +optional
- // +listType=atomic
- // +featureGate=DRADeviceTaints
- repeated DeviceToleration tolerations = 7;
-}
-
// The device this taint is attached to has the "effect" on
// any claim which does not tolerate the taint and, through the claim,
// to pods using the claim.
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
message DeviceTaint {
// The taint key to be applied to a device.
// Must be a label name.
@@ -949,424 +214,3 @@ message DeviceTaintSelector {
repeated DeviceSelector selectors = 5;
}
-// The ResourceClaim this DeviceToleration is attached to tolerates any taint that matches
-// the triple using the matching operator .
-message DeviceToleration {
- // Key is the taint key that the toleration applies to. Empty means match all taint keys.
- // If the key is empty, operator must be Exists; this combination means to match all values and all keys.
- // Must be a label name.
- //
- // +optional
- optional string key = 1;
-
- // Operator represents a key's relationship to the value.
- // Valid operators are Exists and Equal. Defaults to Equal.
- // Exists is equivalent to wildcard for value, so that a ResourceClaim can
- // tolerate all taints of a particular category.
- //
- // +optional
- // +default="Equal"
- optional string operator = 2;
-
- // Value is the taint value the toleration matches to.
- // If the operator is Exists, the value must be empty, otherwise just a regular string.
- // Must be a label value.
- //
- // +optional
- optional string value = 3;
-
- // Effect indicates the taint effect to match. Empty means match all taint effects.
- // When specified, allowed values are NoSchedule and NoExecute.
- //
- // +optional
- optional string effect = 4;
-
- // TolerationSeconds represents the period of time the toleration (which must be
- // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
- // it is not set, which means tolerate the taint forever (do not evict). Zero and
- // negative values will be treated as 0 (evict immediately) by the system.
- // If larger than zero, the time when the pod needs to be evicted is calculated as