diff --git a/go.mod b/go.mod index 8ab5fd9cc3..392ade166f 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,8 @@ require ( github.com/containers/storage v1.58.0 github.com/coreos/go-oidc/v3 v3.14.1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/distribution/distribution/v3 v3.0.0-20230519140516-983358f8e250 + github.com/distribution/distribution/v3 v3.0.0 + github.com/distribution/reference v0.6.0 github.com/docker/docker v25.0.6+incompatible github.com/docker/go-units v0.5.0 github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 @@ -33,11 +34,12 @@ require ( github.com/moby/sys/sequential v0.5.0 github.com/moby/term v0.5.2 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/image-spec v1.1.0 + github.com/opencontainers/image-spec v1.1.1 github.com/openshift/api v0.0.0-20250710082954-674ad74beffc github.com/openshift/build-machinery-go v0.0.0-20250602125535-1b6d00b8c37c github.com/openshift/client-go v0.0.0-20250710075018-396b36f983ee github.com/openshift/library-go v0.0.0-20250711143941-47604345e7ea + github.com/openshift/library-go/pkg/image/registryclient/v2 v2.0.0-00010101000000-000000000000 github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c github.com/prometheus/client_golang v1.22.0 github.com/russross/blackfriday v1.6.0 @@ -80,8 +82,7 @@ require ( github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.1.9 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect - github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker-credential-helpers v0.8.1 // indirect + github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -95,7 +96,7 @@ require ( github.com/go-git/go-billy/v5 v5.6.1 // indirect github.com/go-git/go-git/v5 v5.13.1 // indirect github.com/go-jose/go-jose/v4 v4.0.5 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/errors v0.21.1 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -125,7 +126,8 @@ require ( github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/golang-lru/arc/v2 v2.0.7 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/int128/listener v1.2.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect @@ -161,9 +163,9 @@ require ( github.com/pkg/profile v1.7.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/proglottis/gpgme v0.1.3 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/procfs v0.17.0 // indirect github.com/robfig/cron v1.2.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect @@ -184,9 +186,9 @@ require ( go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/text v0.27.0 // indirect - golang.org/x/time v0.9.0 // indirect + golang.org/x/time v0.12.0 // indirect golang.org/x/tools v0.34.0 // indirect - google.golang.org/protobuf v1.36.5 // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -207,3 +209,5 @@ require ( ) replace github.com/apcera/gssapi => github.com/openshift/gssapi v0.0.0-20161010215902-5fb4217df13b + +replace github.com/openshift/library-go/pkg/image/registryclient/v2 => github.com/tchap/library-go/pkg/image/registryclient/v2 v2.0.0-20250722083015-8716d02a8a94 diff --git a/go.sum b/go.sum index b03f1ea0be..e0229856ab 100644 --- a/go.sum +++ b/go.sum @@ -86,8 +86,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/distribution/distribution/v3 v3.0.0-20230519140516-983358f8e250 h1:B3eXeuNArF23dRKyO8RXRiL7YAJDus6B7bjXeYPt87k= -github.com/distribution/distribution/v3 v3.0.0-20230519140516-983358f8e250/go.mod h1:t1IxPNGdTGez+YGKyJyQrtSSqisfMIm1hnFhvMPlxtE= +github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= +github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/cli v25.0.3+incompatible h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6/EsX/6284= @@ -96,8 +96,8 @@ github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBi github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v25.0.6+incompatible h1:5cPwbwriIcsua2REJe8HqQV+6WlWc1byg2QSXzBxBGg= github.com/docker/docker v25.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= -github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= +github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= @@ -142,8 +142,8 @@ github.com/go-ldap/ldap/v3 v3.4.3 h1:JCKUtJPIcyOuG7ctGabLKMgIlKnGumD/iGjuWeEruDI github.com/go-ldap/ldap/v3 v3.4.3/go.mod h1:7LdHfVt6iIOESVEe3Bs4Jp2sHEKgDeduAhgM1/f9qmo= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= @@ -273,8 +273,10 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= -github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= +github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -389,8 +391,8 @@ github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runc v1.1.13 h1:98S2srgG9vw0zWcDpFMn5TRrh8kLxa/5OFUstuUhmRs= github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= @@ -431,17 +433,17 @@ github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/ github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -492,6 +494,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tchap/library-go/pkg/image/registryclient/v2 v2.0.0-20250722083015-8716d02a8a94 h1:J+xbf7C4qRwU1vo+FS+kb5AZhCM/xtBIF+Fg4MQuyOY= +github.com/tchap/library-go/pkg/image/registryclient/v2 v2.0.0-20250722083015-8716d02a8a94/go.mod h1:yK17k8WxKQLWp5d8KkcLL0VWrNSFfdC521IPhdmwDIo= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= @@ -611,8 +615,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -628,13 +632,13 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/distributionopts/options.go b/internal/distributionopts/options.go new file mode 100644 index 0000000000..226467236f --- /dev/null +++ b/internal/distributionopts/options.go @@ -0,0 +1,44 @@ +package distributionopts + +import ( + "fmt" + + "github.com/distribution/distribution/v3" + "github.com/distribution/reference" +) + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithDescriptor returns a BlobCreateOption which provides the expected blob metadata. +func WithDescriptor(desc distribution.Descriptor) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + if opts.Mount.Stat == nil { + opts.Mount.Stat = &desc + } + return nil + }) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} diff --git a/pkg/cli/admin/catalog/mirror.go b/pkg/cli/admin/catalog/mirror.go index 06106a09f0..36827294e9 100644 --- a/pkg/cli/admin/catalog/mirror.go +++ b/pkg/cli/admin/catalog/mirror.go @@ -15,7 +15,6 @@ import ( "github.com/alicebob/sqlittle" "github.com/distribution/distribution/v3" - "github.com/distribution/distribution/v3/manifest/manifestlist" "github.com/joelanford/ignore" "github.com/opencontainers/go-digest" "github.com/spf13/cobra" @@ -244,14 +243,15 @@ func (o *MirrorCatalogOptions) Complete(cmd *cobra.Command, args []string) error retriever := &info.ImageRetriever{ FileDir: o.FileDir, SecurityOptions: o.SecurityOptions, - ManifestListCallback: func(from string, list *manifestlist.DeserializedManifestList, all map[digest.Digest]distribution.Manifest) (map[digest.Digest]distribution.Manifest, error) { + ManifestListCallback: func(from string, manifestContainer distribution.Manifest, all map[digest.Digest]distribution.Manifest) (map[digest.Digest]distribution.Manifest, error) { filtered := make(map[digest.Digest]distribution.Manifest) - for _, manifest := range list.Manifests { - if !o.FilterOptions.Include(&manifest, len(list.Manifests) > 1) { - klog.V(5).Infof("Skipping image for %#v from %s", manifest.Platform, from) + descriptors := manifestContainer.References() + for _, descriptor := range descriptors { + if !o.FilterOptions.Include(descriptor, len(descriptors) > 1) { + klog.V(5).Infof("Skipping image for %#v from %s", descriptor.Platform, from) continue } - filtered[manifest.Digest] = all[manifest.Digest] + filtered[descriptor.Digest] = all[descriptor.Digest] } if len(filtered) == 1 { return filtered, nil @@ -260,8 +260,8 @@ func (o *MirrorCatalogOptions) Complete(cmd *cobra.Command, args []string) error buf := &bytes.Buffer{} w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) fmt.Fprintf(w, " OS\tDIGEST\n") - for _, manifest := range list.Manifests { - fmt.Fprintf(w, " %s\t%s\n", imagemanifest.PlatformSpecString(manifest.Platform), manifest.Digest) + for _, descriptor := range descriptors { + fmt.Fprintf(w, " %s\t%s\n", imagemanifest.PlatformSpecString(descriptor.Platform), descriptor.Digest) } w.Flush() return nil, fmt.Errorf("the image is a manifest list and contains multiple images - use --index-filter-by-os to select from:\n\n%s\n", buf.String()) diff --git a/pkg/cli/admin/release/info.go b/pkg/cli/admin/release/info.go index ba7f58810f..8d4fbda532 100644 --- a/pkg/cli/admin/release/info.go +++ b/pkg/cli/admin/release/info.go @@ -25,7 +25,6 @@ import ( "github.com/MakeNowJust/heredoc" "github.com/blang/semver" "github.com/distribution/distribution/v3" - "github.com/distribution/distribution/v3/manifest/manifestlist" units "github.com/docker/go-units" digest "github.com/opencontainers/go-digest" "github.com/spf13/cobra" @@ -945,14 +944,15 @@ func (o *InfoOptions) retrieveReleaseImages(release *ReleaseInfo, verifier image FileDir: o.FileDir, SecurityOptions: o.SecurityOptions, ParallelOptions: o.ParallelOptions, - ManifestListCallback: func(from string, list *manifestlist.DeserializedManifestList, all map[digest.Digest]distribution.Manifest) (map[digest.Digest]distribution.Manifest, error) { + ManifestListCallback: func(from string, manifestContainer distribution.Manifest, all map[digest.Digest]distribution.Manifest) (map[digest.Digest]distribution.Manifest, error) { filtered := make(map[digest.Digest]distribution.Manifest) - for _, manifest := range list.Manifests { - if !o.FilterOptions.Include(&manifest, len(list.Manifests) > 1) { - klog.V(5).Infof("Skipping image for %#v from %s", manifest.Platform, from) + descriptors := manifestContainer.References() + for _, descriptor := range descriptors { + if !o.FilterOptions.Include(descriptor, len(descriptors) > 1) { + klog.V(5).Infof("Skipping image for %#v from %s", descriptor.Platform, from) continue } - filtered[manifest.Digest] = all[manifest.Digest] + filtered[descriptor.Digest] = all[descriptor.Digest] } if len(filtered) == 1 { return filtered, nil diff --git a/pkg/cli/admin/verifyimagesignature/manifest.go b/pkg/cli/admin/verifyimagesignature/manifest.go index 2285e3e117..86ce79c4eb 100644 --- a/pkg/cli/admin/verifyimagesignature/manifest.go +++ b/pkg/cli/admin/verifyimagesignature/manifest.go @@ -9,7 +9,7 @@ import ( "k8s.io/client-go/rest" - "github.com/openshift/library-go/pkg/image/registryclient" + "github.com/openshift/library-go/pkg/image/registryclient/v2" ) // getImageManifestByIDFromRegistry retrieves the image manifest from the registry using the basic diff --git a/pkg/cli/image/append/append.go b/pkg/cli/image/append/append.go index fafae8ee63..c02aaebfda 100644 --- a/pkg/cli/image/append/append.go +++ b/pkg/cli/image/append/append.go @@ -15,11 +15,12 @@ import ( "github.com/distribution/distribution/v3" "github.com/distribution/distribution/v3/manifest/manifestlist" + "github.com/distribution/distribution/v3/manifest/ocischema" "github.com/distribution/distribution/v3/manifest/schema2" - "github.com/distribution/distribution/v3/reference" - "github.com/distribution/distribution/v3/registry/client" + "github.com/distribution/reference" units "github.com/docker/go-units" digest "github.com/opencontainers/go-digest" + imagespecv1 "github.com/opencontainers/image-spec/specs-go/v1" "k8s.io/cli-runtime/pkg/genericiooptions" kcmdutil "k8s.io/kubectl/pkg/cmd/util" @@ -27,7 +28,8 @@ import ( "github.com/openshift/api/image/docker10" "github.com/openshift/library-go/pkg/image/dockerv1client" - "github.com/openshift/library-go/pkg/image/registryclient" + "github.com/openshift/library-go/pkg/image/registryclient/v2" + "github.com/openshift/oc/internal/distributionopts" "github.com/openshift/oc/pkg/cli/image/imagesource" imagemanifest "github.com/openshift/oc/pkg/cli/image/manifest" "github.com/openshift/oc/pkg/cli/image/workqueue" @@ -291,12 +293,17 @@ func (o *AppendImageOptions) Run() error { // if keep-manifest-list is enabled and the image is a manifestlist, add layers only to the filtered sub manifests specified by filter-by-os. // If no filter-by-os is specified, add layers to all sub manifests if o.KeepManifestList { - ismanifestlist, err := imagemanifest.IsManifestList(ctx, from.Ref, repo) + manifest, _, err := imagemanifest.GetManifestByDockerReference(ctx, from.Ref, repo) if err != nil { - return fmt.Errorf("unable to read image %s: %v", from, err) + return err } - if ismanifestlist { - return o.appendManifestList(ctx, createdAt, from, to, repo, toRepo, toManifests, o.FilterOptions.Include) + + switch fromManifest := manifest.(type) { + case *manifestlist.DeserializedManifestList: + return o.appendManifestList(ctx, createdAt, from, fromManifest, to, repo, toRepo, toManifests, o.FilterOptions.Include) + + case *ocischema.DeserializedImageIndex: + return o.appendImageIndex(ctx, createdAt, from, fromManifest, to, repo, toRepo, toManifests, o.FilterOptions.Include) } } srcManifest, manifestLocation, err = imagemanifest.FirstManifest(ctx, from.Ref, repo, o.FilterOptions.Include) @@ -309,16 +316,22 @@ func (o *AppendImageOptions) Run() error { } func (o *AppendImageOptions) appendManifestList(ctx context.Context, createdAt *time.Time, - from *imagesource.TypedImageReference, to imagesource.TypedImageReference, + from *imagesource.TypedImageReference, fromManifestList *manifestlist.DeserializedManifestList, to imagesource.TypedImageReference, repo distribution.Repository, toRepo distribution.Repository, - toManifests distribution.ManifestService, filterFn imagemanifest.FilterFunc) error { - // process manifestlist - manifestMap, oldList, _, err := imagemanifest.AllManifests(ctx, from.Ref, repo) + toManifests distribution.ManifestService, filterFn imagemanifest.FilterFunc, +) error { + // Get child manifests. + manifestMap, _, _, err := imagemanifest.AllManifests(ctx, from.Ref, repo) + if err != nil { + return err + } + // create new manifestlist from the old one swapping digest with the new ones + oldList := fromManifestList newDescriptors := make([]manifestlist.ManifestDescriptor, 0, len(oldList.Manifests)) for _, manifest := range oldList.Manifests { // add layers only to the sub manifests that are specified by the filter - if !filterFn(&manifest, len(oldList.Manifests) > 1) { + if !filterFn(manifest.Descriptor, len(oldList.Manifests) > 1) { klog.V(5).Infof("Skipping append for image %s for %#v from %s", manifest.Digest, manifest.Platform, from.Ref) } else { // create new ManifestLocation for each digest from the manifestlist @@ -362,6 +375,66 @@ func (o *AppendImageOptions) appendManifestList(ctx context.Context, createdAt * return nil } +func (o *AppendImageOptions) appendImageIndex(ctx context.Context, createdAt *time.Time, + from *imagesource.TypedImageReference, fromImageIndex *ocischema.DeserializedImageIndex, to imagesource.TypedImageReference, + repo distribution.Repository, toRepo distribution.Repository, + toManifests distribution.ManifestService, filterFn imagemanifest.FilterFunc, +) error { + // Get child manifests. + manifestMap, _, _, err := imagemanifest.AllManifests(ctx, from.Ref, repo) + if err != nil { + return err + } + + // create new image index from the old one swapping digest with the new ones + oldList := fromImageIndex + newDescriptors := make([]imagespecv1.Descriptor, 0, len(oldList.Manifests)) + for _, manifest := range oldList.Manifests { + // add layers only to the sub manifests that are specified by the filter + if !filterFn(manifest, len(oldList.Manifests) > 1) { + klog.V(5).Infof("Skipping append for image %s for %#v from %s", manifest.Digest, manifest.Platform, from.Ref) + } else { + // create new ManifestLocation for each digest from the manifestlist + // because append function relies on that data + dgstManifestLocation := imagemanifest.ManifestLocation{Manifest: manifest.Digest} + // to ensure we can read from the Reader multiple times, especially + // when dealing with ManifestList, where we copy the same layer contents + // (the release-manifests/ directory), we need to wrap it inside TeeReader + // which reads copies data to buffer while reading it allowing re-use + var buf bytes.Buffer + if o.LayerStream != nil { + o.LayerStream = io.TeeReader(o.LayerStream, &buf) + } + err = o.append(ctx, createdAt, from, to, true, repo, manifestMap[manifest.Digest], dgstManifestLocation, toRepo, toManifests) + if err != nil { + return fmt.Errorf("error appending image %s: %w", manifest.Digest, err) + } + if buf.Len() > 0 { + o.LayerStream = &buf + } + manifest.Digest = o.ToDigest + manifest.Size = o.ToSize + } + newDescriptors = append(newDescriptors, manifest) + } + forPush, err := ocischema.FromDescriptors(newDescriptors, oldList.Annotations) + if err != nil { + return fmt.Errorf("error creating new image index: %#v", err) + } + + // push new manifestlist to registry + toDigest, err := imagemanifest.PutManifestInCompatibleSchema(ctx, forPush, to.Ref.Tag, toManifests, toRepo.Named(), nil, nil) + if err != nil { + return fmt.Errorf("unable to push image index: %#v", err) + } + o.ToDigest = toDigest + if !o.DryRun { + fmt.Fprintf(o.Out, "Pushed %s to %s\n", toDigest, to) + } + + return nil +} + func (o *AppendImageOptions) append(ctx context.Context, createdAt *time.Time, from *imagesource.TypedImageReference, to imagesource.TypedImageReference, skipTagging bool, repo distribution.Repository, srcManifest distribution.Manifest, manifestLocation imagemanifest.ManifestLocation, @@ -587,13 +660,13 @@ func copyBlob(ctx context.Context, fromBlobs, toBlobs distribution.BlobService, defer r.Close() // destination - mountOptions := []distribution.BlobCreateOption{WithDescriptor(layer)} + mountOptions := []distribution.BlobCreateOption{distributionopts.WithDescriptor(layer)} if mountFrom != nil && !needLayerDigest { source, err := reference.WithDigest(mountFrom, layer.Digest) if err != nil { return distribution.Descriptor{}, "", err } - mountOptions = append(mountOptions, client.WithMountFrom(source)) + mountOptions = append(mountOptions, distributionopts.WithMountFrom(source)) } bw, err := toBlobs.Create(ctx, mountOptions...) if err != nil { @@ -644,26 +717,6 @@ func copyBlob(ctx context.Context, fromBlobs, toBlobs distribution.BlobService, return desc, layerDigest, nil } -type optionFunc func(interface{}) error - -func (f optionFunc) Apply(v interface{}) error { - return f(v) -} - -// WithDescriptor returns a BlobCreateOption which provides the expected blob metadata. -func WithDescriptor(desc distribution.Descriptor) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { - opts, ok := v.(*distribution.CreateOptions) - if !ok { - return fmt.Errorf("unexpected options type: %T", v) - } - if opts.Mount.Stat == nil { - opts.Mount.Stat = &desc - } - return nil - }) -} - func appendFileAsLayer(ctx context.Context, name string, layers []distribution.Descriptor, config *dockerv1client.DockerImageConfig, dryRun bool, out io.Writer, blobs distribution.BlobService) ([]distribution.Descriptor, error) { f, err := os.Open(name) diff --git a/pkg/cli/image/append/dryrun.go b/pkg/cli/image/append/dryrun.go index 78b8f0725f..bae139c7b8 100644 --- a/pkg/cli/image/append/dryrun.go +++ b/pkg/cli/image/append/dryrun.go @@ -10,7 +10,7 @@ import ( "github.com/distribution/distribution/v3" digest "github.com/opencontainers/go-digest" - "github.com/openshift/library-go/pkg/image/registryclient" + "github.com/openshift/library-go/pkg/image/registryclient/v2" ) // dryRunManifestService emulates a remote registry for dry run behavior diff --git a/pkg/cli/image/append/scratch.go b/pkg/cli/image/append/scratch.go index 916d250c48..ce2f9e6e9c 100644 --- a/pkg/cli/image/append/scratch.go +++ b/pkg/cli/image/append/scratch.go @@ -7,7 +7,7 @@ import ( "net/http" "github.com/distribution/distribution/v3" - "github.com/distribution/distribution/v3/reference" + "github.com/distribution/reference" digest "github.com/opencontainers/go-digest" "github.com/openshift/oc/pkg/helpers/image/dockerlayer" diff --git a/pkg/cli/image/extract/extract.go b/pkg/cli/image/extract/extract.go index ae4ac5042d..debfed76d5 100644 --- a/pkg/cli/image/extract/extract.go +++ b/pkg/cli/image/extract/extract.go @@ -26,7 +26,7 @@ import ( "k8s.io/kubectl/pkg/util/templates" "github.com/openshift/library-go/pkg/image/dockerv1client" - "github.com/openshift/library-go/pkg/image/registryclient" + "github.com/openshift/library-go/pkg/image/registryclient/v2" "github.com/openshift/oc/pkg/cli/image/archive" "github.com/openshift/oc/pkg/cli/image/imagesource" imagemanifest "github.com/openshift/oc/pkg/cli/image/manifest" diff --git a/pkg/cli/image/imagesource/dryrun.go b/pkg/cli/image/imagesource/dryrun.go index fb7a56650c..a8c8939fde 100644 --- a/pkg/cli/image/imagesource/dryrun.go +++ b/pkg/cli/image/imagesource/dryrun.go @@ -1,20 +1,18 @@ package imagesource import ( + "context" "errors" "net/http" "github.com/distribution/distribution/v3" - "github.com/distribution/distribution/v3/reference" - registryclient "github.com/distribution/distribution/v3/registry/client" + + "github.com/openshift/library-go/pkg/image/registryclient/v2" ) func NewDryRun(ref TypedImageReference) (distribution.Repository, error) { - named, err := reference.WithName(ref.Ref.RepositoryName()) - if err != nil { - return nil, err - } - return registryclient.NewRepository(named, ref.Ref.RegistryURL().String(), dryRunRoundTripper) + return registryclient.NewContext(dryRunRoundTripper, dryRunRoundTripper). + Repository(context.Background(), ref.Ref.RegistryURL(), ref.Ref.RepositoryName(), false) } var dryRunRoundTripper = errorRoundTripper{errors.New("dry-run repository is not available")} diff --git a/pkg/cli/image/imagesource/file.go b/pkg/cli/image/imagesource/file.go index 2d40a51b8e..b6a6997d49 100644 --- a/pkg/cli/image/imagesource/file.go +++ b/pkg/cli/image/imagesource/file.go @@ -16,7 +16,7 @@ import ( man "github.com/containers/image/v5/manifest" "github.com/distribution/distribution/v3" - "github.com/distribution/distribution/v3/reference" + "github.com/distribution/reference" "github.com/opencontainers/go-digest" godigest "github.com/opencontainers/go-digest" ) diff --git a/pkg/cli/image/imagesource/options.go b/pkg/cli/image/imagesource/options.go index 2133194bd9..bc7f8f68be 100644 --- a/pkg/cli/image/imagesource/options.go +++ b/pkg/cli/image/imagesource/options.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/distribution/distribution/v3" - "github.com/openshift/library-go/pkg/image/registryclient" + "github.com/openshift/library-go/pkg/image/registryclient/v2" "k8s.io/klog/v2" ) diff --git a/pkg/cli/image/imagesource/s3.go b/pkg/cli/image/imagesource/s3.go index da8356d548..dafdabe9e4 100644 --- a/pkg/cli/image/imagesource/s3.go +++ b/pkg/cli/image/imagesource/s3.go @@ -22,10 +22,11 @@ import ( "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/distribution/distribution/v3" - "github.com/distribution/distribution/v3/reference" - "github.com/distribution/distribution/v3/registry/client/auth" - "github.com/distribution/distribution/v3/registry/client/transport" + "github.com/distribution/reference" godigest "github.com/opencontainers/go-digest" + + "github.com/openshift/library-go/pkg/image/registryclient/v2/auth" + "github.com/openshift/library-go/pkg/image/registryclient/v2/transport" ) type s3Driver struct { @@ -91,7 +92,9 @@ func (d *s3Driver) newObject(server *url.URL, region string, insecure bool, secu if d.UserAgent != "" { awsConfig.WithHTTPClient(&http.Client{ - Transport: transport.NewTransport(http.DefaultTransport, transport.NewHeaderRequestModifier(http.Header{http.CanonicalHeaderKey("User-Agent"): []string{d.UserAgent}})), + Transport: transport.NewTransport(http.DefaultTransport, transport.NewHeaderRequestModifier(http.Header{ + http.CanonicalHeaderKey("user-agent"): []string{d.UserAgent}, + })), }) } s, err := session.NewSession(awsConfig) diff --git a/pkg/cli/image/info/info.go b/pkg/cli/image/info/info.go index 1aa3370b87..d69816b67a 100644 --- a/pkg/cli/image/info/info.go +++ b/pkg/cli/image/info/info.go @@ -13,7 +13,6 @@ import ( "time" "github.com/distribution/distribution/v3" - "github.com/distribution/distribution/v3/manifest/manifestlist" units "github.com/docker/go-units" digest "github.com/opencontainers/go-digest" "github.com/spf13/cobra" @@ -26,7 +25,7 @@ import ( "k8s.io/kubectl/pkg/util/templates" "github.com/openshift/library-go/pkg/image/dockerv1client" - "github.com/openshift/library-go/pkg/image/registryclient" + "github.com/openshift/library-go/pkg/image/registryclient/v2" "github.com/openshift/oc/pkg/cli/image/imagesource" imagemanifest "github.com/openshift/oc/pkg/cli/image/manifest" "github.com/openshift/oc/pkg/cli/image/strategy" @@ -151,14 +150,15 @@ func (o *InfoOptions) Run() error { retriever := &ImageRetriever{ FileDir: o.FileDir, SecurityOptions: o.SecurityOptions, - ManifestListCallback: func(from string, list *manifestlist.DeserializedManifestList, all map[digest.Digest]distribution.Manifest) (map[digest.Digest]distribution.Manifest, error) { + ManifestListCallback: func(from string, manifestContainer distribution.Manifest, all map[digest.Digest]distribution.Manifest) (map[digest.Digest]distribution.Manifest, error) { filtered := make(map[digest.Digest]distribution.Manifest) - for _, manifest := range list.Manifests { - if !o.FilterOptions.Include(&manifest, len(list.Manifests) > 1) { - klog.V(5).Infof("Skipping image for %#v from %s", manifest.Platform, from) + descriptors := manifestContainer.References() + for _, descriptor := range descriptors { + if !o.FilterOptions.Include(descriptor, len(descriptors) > 1) { + klog.V(5).Infof("Skipping image for %#v from %s", descriptor.Platform, from) continue } - filtered[manifest.Digest] = all[manifest.Digest] + filtered[descriptor.Digest] = all[descriptor.Digest] } if len(filtered) == 1 { return filtered, nil @@ -171,11 +171,11 @@ func (o *InfoOptions) Run() error { buf := &bytes.Buffer{} w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) fmt.Fprintf(w, " OS\tDIGEST\n") - for _, manifest := range list.Manifests { - fmt.Fprintf(w, " %s\t%s\n", imagemanifest.PlatformSpecString(manifest.Platform), manifest.Digest) + for _, descriptor := range descriptors { + fmt.Fprintf(w, " %s\t%s\n", imagemanifest.PlatformSpecString(descriptor.Platform), descriptor.Digest) } w.Flush() - return nil, fmt.Errorf("the image is a manifest list and contains multiple images - use --filter-by-os to select from:\n\n%s\n", buf.String()) + return nil, fmt.Errorf("the image is a manifest list and contains multiple images - use --index-filter-by-os to select from:\n\n%s\n", buf.String()) }, ImageMetadataCallback: func(from string, i *Image, err error) error { @@ -377,11 +377,14 @@ type ImageRetriever struct { // MaxPerRegistry is set higher than 1. If err is passed image is nil. If an error is returned // execution will stop. ImageMetadataCallback func(from string, image *Image, err error) error - // ManifestListCallback, if specified, is invoked if the root image is a manifest list. If an - // error returned processing stops. If zero manifests are returned the next item is rendered + // ManifestListCallback, if specified, is invoked if the root image is a manifest container, + // i.e. a manifest list or an image index. The callback must use type assertions if necessary, + // the possible types being *manifestlist.DeserializedManifestList or *ocischema.DeserializedImageIndex. + // + // If an error is returned, processing stops. If zero manifests are returned the next item is rendered // and no ImageMetadataCallback calls occur. If more than one manifest is returned // ImageMetadataCallback will be invoked once for each item. - ManifestListCallback func(from string, list *manifestlist.DeserializedManifestList, all map[digest.Digest]distribution.Manifest) (map[digest.Digest]distribution.Manifest, error) + ManifestListCallback func(from string, manifestContainer distribution.Manifest, all map[digest.Digest]distribution.Manifest) (map[digest.Digest]distribution.Manifest, error) } // Image returns a single image matching ref. @@ -433,7 +436,7 @@ func (o *ImageRetriever) Images(ctx context.Context, refs map[string]imagesource return callbackFn(name, nil, fmt.Errorf("unable to connect to image repository %s: %v", from, err)) } - allManifests, manifestList, listDigest, err := imagemanifest.AllManifests(ctx, from.Ref, repo) + allManifests, manifest, manifestDigest, err := imagemanifest.AllManifests(ctx, from.Ref, repo) if err != nil { if imagemanifest.IsImageForbidden(err) { msg := fmt.Sprintf("image %q does not exist or you don't have permission to access the repository", from) @@ -446,8 +449,8 @@ func (o *ImageRetriever) Images(ctx context.Context, refs map[string]imagesource return callbackFn(name, nil, fmt.Errorf("unable to read image %s: %v", from, err)) } - if o.ManifestListCallback != nil && manifestList != nil { - allManifests, err = o.ManifestListCallback(name, manifestList, allManifests) + if o.ManifestListCallback != nil && imagemanifest.IsManifestContainer(manifest) { + allManifests, err = o.ManifestListCallback(name, manifest, allManifests) if err != nil { return err } @@ -463,7 +466,7 @@ func (o *ImageRetriever) Images(ctx context.Context, refs map[string]imagesource return callbackFn(name, nil, contentErr) } - imageConfig, layers, manifestErr := imagemanifest.ManifestToImageConfig(ctx, srcManifest, repo.Blobs(ctx), imagemanifest.ManifestLocation{ManifestList: listDigest, Manifest: srcDigest}) + imageConfig, layers, manifestErr := imagemanifest.ManifestToImageConfig(ctx, srcManifest, repo.Blobs(ctx), imagemanifest.ManifestLocation{ManifestList: manifestDigest, Manifest: srcDigest}) mediaType, _, _ := srcManifest.Payload() if err := callbackFn(name, &Image{ Name: from.Ref.Exact(), @@ -471,7 +474,7 @@ func (o *ImageRetriever) Images(ctx context.Context, refs map[string]imagesource MediaType: mediaType, Digest: srcDigest, ContentDigest: contentDigest, - ListDigest: listDigest, + ListDigest: manifestDigest, Config: imageConfig, Layers: layers, Manifest: srcManifest, diff --git a/pkg/cli/image/manifest/dockercredentials/credential_store_factory.go b/pkg/cli/image/manifest/dockercredentials/credential_store_factory.go index d3789bf3f5..a2008ee43c 100644 --- a/pkg/cli/image/manifest/dockercredentials/credential_store_factory.go +++ b/pkg/cli/image/manifest/dockercredentials/credential_store_factory.go @@ -5,9 +5,10 @@ import ( "sync" "github.com/containers/image/v5/docker/reference" - "github.com/distribution/distribution/v3/registry/client/auth" "github.com/docker/docker/api/types/registry" - "github.com/openshift/library-go/pkg/image/registryclient" + + "github.com/openshift/library-go/pkg/image/registryclient/v2" + "github.com/openshift/library-go/pkg/image/registryclient/v2/auth" ) // NewCredentialStoreFactory returns an entity capable of creating a CredentialStore diff --git a/pkg/cli/image/manifest/dockercredentials/credential_store_factory_test.go b/pkg/cli/image/manifest/dockercredentials/credential_store_factory_test.go index 68fa7bdfc0..0b8fb85d9e 100644 --- a/pkg/cli/image/manifest/dockercredentials/credential_store_factory_test.go +++ b/pkg/cli/image/manifest/dockercredentials/credential_store_factory_test.go @@ -3,7 +3,7 @@ package dockercredentials import ( imageTypes "github.com/containers/image/v5/types" - "github.com/openshift/library-go/pkg/image/registryclient" + "github.com/openshift/library-go/pkg/image/registryclient/v2" "testing" ) diff --git a/pkg/cli/image/manifest/manifest.go b/pkg/cli/image/manifest/manifest.go index 4e71cb11c0..50feb55a53 100644 --- a/pkg/cli/image/manifest/manifest.go +++ b/pkg/cli/image/manifest/manifest.go @@ -3,6 +3,7 @@ package manifest import ( "context" "encoding/json" + "errors" "fmt" "net/http" "os" @@ -10,26 +11,26 @@ import ( "runtime" "sync" - "github.com/spf13/pflag" - "github.com/distribution/distribution/v3" "github.com/distribution/distribution/v3/manifest/manifestlist" "github.com/distribution/distribution/v3/manifest/ocischema" - "github.com/distribution/distribution/v3/manifest/schema1" "github.com/distribution/distribution/v3/manifest/schema2" - "github.com/distribution/distribution/v3/reference" "github.com/distribution/distribution/v3/registry/api/errcode" v2 "github.com/distribution/distribution/v3/registry/api/v2" + "github.com/distribution/reference" "github.com/docker/libtrust" "github.com/opencontainers/go-digest" + "github.com/spf13/pflag" + "k8s.io/client-go/rest" "k8s.io/klog/v2" imagespecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/openshift/library-go/pkg/image/dockerv1client" imagereference "github.com/openshift/library-go/pkg/image/reference" - "github.com/openshift/library-go/pkg/image/registryclient" + "github.com/openshift/library-go/pkg/image/registryclient/v2" + "github.com/openshift/library-go/pkg/image/registryclient/v2/manifest/schema1" "github.com/openshift/oc/pkg/cli/image/manifest/dockercredentials" "github.com/openshift/oc/pkg/helpers/image/dockerlayer/add" ) @@ -200,18 +201,18 @@ func (o *FilterOptions) IsWildcardFilter() bool { // Include returns true if the provided manifest should be included, or the first image if the user didn't alter the // default selection and there is only one image. -func (o *FilterOptions) Include(d *manifestlist.ManifestDescriptor, hasMultiple bool) bool { +func (o *FilterOptions) Include(descriptor imagespecv1.Descriptor, hasMultiple bool) bool { if o.OSFilter == nil { return true } if o.DefaultOSFilter && !hasMultiple { return true } - s := PlatformSpecString(d.Platform) + s := PlatformSpecString(descriptor.Platform) return o.OSFilter.MatchString(s) } -func PlatformSpecString(platform manifestlist.PlatformSpec) string { +func PlatformSpecString(platform *imagespecv1.Platform) string { if len(platform.Variant) > 0 { return fmt.Sprintf("%s/%s/%s", platform.OS, platform.Architecture, platform.Variant) } @@ -219,15 +220,15 @@ func PlatformSpecString(platform manifestlist.PlatformSpec) string { } // IncludeAll returns true if the provided manifest matches the filter, or all if there was no filter. -func (o *FilterOptions) IncludeAll(d *manifestlist.ManifestDescriptor, hasMultiple bool) bool { +func (o *FilterOptions) IncludeAll(descriptor imagespecv1.Descriptor, hasMultiple bool) bool { if o.OSFilter == nil { return true } - s := PlatformSpecString(d.Platform) + s := PlatformSpecString(descriptor.Platform) return o.OSFilter.MatchString(s) } -type FilterFunc func(*manifestlist.ManifestDescriptor, bool) bool +type FilterFunc func(imagespecv1.Descriptor, bool) bool // PreferManifestList specifically requests a manifest list first var PreferManifestList = distribution.WithManifestMediaTypes([]string{ @@ -239,23 +240,7 @@ var PreferManifestList = distribution.WithManifestMediaTypes([]string{ // IsManifestList returns if a given image is a manifestlist or not func IsManifestList(ctx context.Context, from imagereference.DockerImageReference, repo distribution.Repository) (bool, error) { - var srcDigest digest.Digest - if len(from.ID) > 0 { - srcDigest = digest.Digest(from.ID) - } else if len(from.Tag) > 0 { - desc, err := repo.Tags(ctx).Get(ctx, from.Tag) - if err != nil { - return false, err - } - srcDigest = desc.Digest - } else { - return false, fmt.Errorf("no tag or digest specified") - } - manifests, err := repo.Manifests(ctx) - if err != nil { - return false, err - } - srcManifest, err := manifests.Get(ctx, srcDigest, PreferManifestList) + srcManifest, _, err := GetManifestByDockerReference(ctx, from, repo) if err != nil { return false, err } @@ -264,30 +249,58 @@ func IsManifestList(ctx context.Context, from imagereference.DockerImageReferenc return ok, nil } -// AllManifests returns all non-list manifests, the list manifest (if any), the digest the from refers to, or an error. -func AllManifests(ctx context.Context, from imagereference.DockerImageReference, repo distribution.Repository) (map[digest.Digest]distribution.Manifest, *manifestlist.DeserializedManifestList, digest.Digest, error) { +func GetManifestByDockerReference(ctx context.Context, from imagereference.DockerImageReference, repo distribution.Repository) (distribution.Manifest, digest.Digest, error) { var srcDigest digest.Digest if len(from.ID) > 0 { srcDigest = digest.Digest(from.ID) } else if len(from.Tag) > 0 { desc, err := repo.Tags(ctx).Get(ctx, from.Tag) if err != nil { - return nil, nil, "", err + return nil, "", fmt.Errorf("unable to get manifest descriptor for tag %s: %w", from, err) } srcDigest = desc.Digest } else { - return nil, nil, "", fmt.Errorf("no tag or digest specified") + return nil, "", errors.New("no tag or digest specified") } - manifests, err := repo.Manifests(ctx) + + manifestService, err := repo.Manifests(ctx) if err != nil { - return nil, nil, "", err + return nil, "", err } - srcManifest, err := manifests.Get(ctx, srcDigest, PreferManifestList) + + manifest, err := manifestService.Get(ctx, srcDigest, PreferManifestList) + if err != nil { + return nil, "", fmt.Errorf("unable to get manifest for %s: %w", from.String(), err) + } + return manifest, srcDigest, nil +} + +// AllManifests gets all child manifests for the given reference. +// For manifests lists and image indexes, the child manifests are gathered. +// Otherwise, the root manifest is returned in the output manifest map. +func AllManifests(ctx context.Context, from imagereference.DockerImageReference, repo distribution.Repository) (map[digest.Digest]distribution.Manifest, distribution.Manifest, digest.Digest, error) { + srcManifest, srcDigest, err := GetManifestByDockerReference(ctx, from, repo) if err != nil { return nil, nil, "", err } - return ManifestsFromList(ctx, srcDigest, srcManifest, manifests, from) + allManifests, err := GetManifests(ctx, srcDigest, srcManifest, repo) + if err != nil { + return nil, nil, "", fmt.Errorf("unable to gather manifests for %s: %w", from.String(), err) + } + return allManifests, srcManifest, srcDigest, nil +} + +// IsManifestContainer returns true when the given manifest is a manifest list or an image index. +func IsManifestContainer(manifest distribution.Manifest) bool { + switch manifest.(type) { + case *manifestlist.DeserializedManifestList: + return true + case *ocischema.DeserializedImageIndex: + return true + default: + return false + } } type ManifestLocation struct { @@ -315,29 +328,13 @@ func (m ManifestLocation) String() string { // FirstManifest returns the first manifest at the request location that matches the filter function. func FirstManifest(ctx context.Context, from imagereference.DockerImageReference, repo distribution.Repository, filterFn FilterFunc) (distribution.Manifest, ManifestLocation, error) { - var srcDigest digest.Digest - if len(from.ID) > 0 { - srcDigest = digest.Digest(from.ID) - } else if len(from.Tag) > 0 { - desc, err := repo.Tags(ctx).Get(ctx, from.Tag) - if err != nil { - return nil, ManifestLocation{}, err - } - srcDigest = desc.Digest - } else { - return nil, ManifestLocation{}, fmt.Errorf("no tag or digest specified") - } - manifests, err := repo.Manifests(ctx) - if err != nil { - return nil, ManifestLocation{}, err - } - srcManifest, err := manifests.Get(ctx, srcDigest, PreferManifestList) + srcManifest, srcDigest, err := GetManifestByDockerReference(ctx, from, repo) if err != nil { return nil, ManifestLocation{}, err } originalSrcDigest := srcDigest - srcChildren, srcManifest, srcDigest, err := ProcessManifestList(ctx, srcDigest, srcManifest, manifests, from, filterFn, false) + srcChildren, srcManifest, srcDigest, err := ProcessManifestList(ctx, srcDigest, srcManifest, repo, from, filterFn, false) if err != nil { return nil, ManifestLocation{}, err } @@ -450,95 +447,157 @@ func ManifestToImageConfig(ctx context.Context, srcManifest distribution.Manifes } } -func ProcessManifestList(ctx context.Context, srcDigest digest.Digest, srcManifest distribution.Manifest, manifests distribution.ManifestService, ref imagereference.DockerImageReference, filterFn FilterFunc, keepManifestList bool) (children []distribution.Manifest, manifest distribution.Manifest, digest digest.Digest, err error) { - var childManifests []distribution.Manifest - switch t := srcManifest.(type) { - case *manifestlist.DeserializedManifestList: - manifestDigest := srcDigest - manifestList := t +// ProcessManifestList processed the given source manifest, filtering and fetching the child manifests as requested. +// Both *manifestlist.DeserializedManifestList and *ocischema.DeserializedImageIndex are supported and +// the matching types are also always returned, i.e. ProcessManifestList won't convert a list to an index and vice versa. +func ProcessManifestList( + ctx context.Context, srcDigest digest.Digest, srcManifest distribution.Manifest, + repo distribution.Repository, ref imagereference.DockerImageReference, + filterFn FilterFunc, keepManifestList bool, +) (children []distribution.Manifest, manifest distribution.Manifest, digest digest.Digest, err error) { - filtered := make([]manifestlist.ManifestDescriptor, 0, len(t.Manifests)) - for _, manifest := range t.Manifests { - if !filterFn(&manifest, len(t.Manifests) > 1) { - klog.V(5).Infof("Skipping image %s for %#v from %s", manifest.Digest, manifest.Platform, ref) + var filtered bool + switch src := srcManifest.(type) { + case *manifestlist.DeserializedManifestList: + // Filter according to the filter function. + filteredManifests := make([]manifestlist.ManifestDescriptor, 0, len(src.Manifests)) + for _, childManifest := range src.Manifests { + if !filterFn(childManifest.Descriptor, len(src.Manifests) > 1) { + klog.V(5).Infof("Skipping image %s for %#v from %s", childManifest.Digest, childManifest.Platform, ref) continue } - klog.V(5).Infof("Including image %s for %#v from %s", manifest.Digest, manifest.Platform, ref) - filtered = append(filtered, manifest) + klog.V(5).Infof("Including image %s for %#v from %s", childManifest.Digest, childManifest.Platform, ref) + filteredManifests = append(filteredManifests, childManifest) } - if len(filtered) == 0 && !keepManifestList { + if len(filteredManifests) == 0 && !keepManifestList { return nil, nil, "", nil } - // if we're not keeping manifest lists and this one has been filtered, make a new one with - // just the filtered platforms. - if len(filtered) != len(t.Manifests) && !keepManifestList { + // Build the filtered manifest, if necessary. + if len(filteredManifests) != len(src.Manifests) && !keepManifestList { var err error - t, err = manifestlist.FromDescriptors(filtered) - if err != nil { - return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list: %v", ref, err) - } - _, body, err := t.Payload() - if err != nil { - return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list (bad payload): %v", ref, err) - } - manifestList = t - manifestDigest, err = registryclient.ContentDigestForManifest(t, srcDigest.Algorithm()) + manifest, err = manifestlist.FromDescriptors(filteredManifests) if err != nil { - return nil, nil, "", err + return nil, nil, "", fmt.Errorf("failed to build filtered manifest list for %s: %w", ref, err) } - klog.V(5).Infof("Filtered manifest list to new digest %s:\n%s", manifestDigest, body) + filtered = true + } else { + manifest = srcManifest } - for i, manifest := range filtered { - childManifest, err := manifests.Get(ctx, manifest.Digest, PreferManifestList) - if err != nil { - return nil, nil, "", fmt.Errorf("unable to retrieve source image %s manifest #%d from manifest list: %v", ref, i+1, err) + case *ocischema.DeserializedImageIndex: + // Filter according to the filter function. + filteredManifests := make([]imagespecv1.Descriptor, 0, len(src.Manifests)) + for _, childManifest := range src.Manifests { + if !filterFn(childManifest, len(src.Manifests) > 1) { + klog.V(5).Infof("Skipping image %s for %#v from %s", childManifest.Digest, childManifest.Platform, ref) + continue } - childManifests = append(childManifests, childManifest) + klog.V(5).Infof("Including image %s for %#v from %s", childManifest.Digest, childManifest.Platform, ref) + filteredManifests = append(filteredManifests, childManifest) + } + + if len(filteredManifests) == 0 && !keepManifestList { + return nil, nil, "", nil } - switch { - case len(childManifests) == 1 && !keepManifestList: - // Just return the single platform specific image - manifestDigest, err := registryclient.ContentDigestForManifest(childManifests[0], srcDigest.Algorithm()) + // Build the filtered manifest, if necessary. + if len(filteredManifests) != len(src.Manifests) && !keepManifestList { + var err error + manifest, err = ocischema.FromDescriptors(filteredManifests, src.Annotations) if err != nil { - return nil, nil, "", err + return nil, nil, "", fmt.Errorf("failed to build filtered image index for %s: %w", ref, err) } - klog.V(2).Infof("Chose %s/%s manifest from the manifest list.", t.Manifests[0].Platform.OS, t.Manifests[0].Platform.Architecture) - return nil, childManifests[0], manifestDigest, nil - default: - return childManifests, manifestList, manifestDigest, nil + filtered = true + } else { + manifest = srcManifest } default: return nil, srcManifest, srcDigest, nil } + + // At this point manifest is set to right return value. + // But we may still need to update the associated digest. + if filtered { + _, body, err := manifest.Payload() + if err != nil { + return nil, nil, "", fmt.Errorf("failed to get filtered manifest payload for %s: %w", ref, err) + } + + digest, err = registryclient.ContentDigestForManifest(manifest, srcDigest.Algorithm()) + if err != nil { + return nil, nil, "", err + } + klog.V(5).Infof("Filtered manifest to new digest %s:\n%s", digest, body) + } else { + digest = srcDigest + } + + // The only step left is to process children. + childMap, err := GetManifests(ctx, digest, manifest, repo) + if err != nil { + return nil, nil, "", fmt.Errorf("failed to get filtered manifest children for %s: %w", ref, err) + } + childList := make([]distribution.Manifest, 0, len(childMap)) + for _, childManifest := range childMap { + childList = append(childList, childManifest) + } + + if len(childList) == 1 && !keepManifestList { + // Just return the single platform specific image + manifestDigest, err := registryclient.ContentDigestForManifest(childList[0], srcDigest.Algorithm()) + if err != nil { + return nil, nil, "", err + } + selected := manifest.References()[0] + klog.V(2).Infof("Chose %s/%s manifest from the manifest list.", selected.Platform.OS, selected.Platform.Architecture) + return nil, childList[0], manifestDigest, nil + } + + return childList, manifest, digest, nil } -// ManifestsFromList returns a map of all image manifests for a given manifest. It returns the ManifestList and its digest if -// srcManifest is a list, or an error. -func ManifestsFromList(ctx context.Context, srcDigest digest.Digest, srcManifest distribution.Manifest, manifests distribution.ManifestService, ref imagereference.DockerImageReference) (map[digest.Digest]distribution.Manifest, *manifestlist.DeserializedManifestList, digest.Digest, error) { - switch t := srcManifest.(type) { - case *manifestlist.DeserializedManifestList: - allManifests := make(map[digest.Digest]distribution.Manifest) - manifestDigest := srcDigest - manifestList := t +// GetManifests gets all child manifests present in the given manifest using the given ManifestService. +func GetManifests(ctx context.Context, srcDigest digest.Digest, srcManifest distribution.Manifest, repo distribution.Repository) (map[digest.Digest]distribution.Manifest, error) { + manifestService, err := repo.Manifests(ctx) + if err != nil { + return nil, fmt.Errorf("unable to get manifest service: %w", err) + } - for i, manifest := range t.Manifests { - childManifest, err := manifests.Get(ctx, manifest.Digest, PreferManifestList) - if err != nil { - return nil, nil, "", fmt.Errorf("unable to retrieve source image %s manifest #%d from manifest list: %v", ref, i+1, err) - } - allManifests[manifest.Digest] = childManifest + switch srcManifest := srcManifest.(type) { + case *manifestlist.DeserializedManifestList: + allManifests, err := getManifestsForDescriptors(ctx, srcManifest.References(), manifestService) + if err != nil { + return nil, fmt.Errorf("unable to retrieve manifest list child manifest: %v", err) } + return allManifests, nil - return allManifests, manifestList, manifestDigest, nil + case *ocischema.DeserializedImageIndex: + allManifests, err := getManifestsForDescriptors(ctx, srcManifest.Manifests, manifestService) + if err != nil { + return nil, fmt.Errorf("unable to retrieve image index child manifest: %v", err) + } + return allManifests, nil default: - return map[digest.Digest]distribution.Manifest{srcDigest: srcManifest}, nil, "", nil + return map[digest.Digest]distribution.Manifest{srcDigest: srcManifest}, nil + } +} + +func getManifestsForDescriptors( + ctx context.Context, descriptors []imagespecv1.Descriptor, manifests distribution.ManifestService, +) (map[digest.Digest]distribution.Manifest, error) { + gatheredManifests := make(map[digest.Digest]distribution.Manifest) + for i, d := range descriptors { + childManifest, err := manifests.Get(ctx, d.Digest, PreferManifestList) + if err != nil { + return nil, fmt.Errorf("unable to retrieve manifest #%d: %w", i+1, err) + } + gatheredManifests[d.Digest] = childManifest } + return gatheredManifests, nil } // TODO: Remove support for v2 schema in 4.9 diff --git a/pkg/cli/image/mirror/mappings.go b/pkg/cli/image/mirror/mappings.go index 1b3e15b320..6fe81cb7ae 100644 --- a/pkg/cli/image/mirror/mappings.go +++ b/pkg/cli/image/mirror/mappings.go @@ -8,9 +8,9 @@ import ( "strings" "sync" - "github.com/distribution/distribution/v3/registry/client/auth" digest "github.com/opencontainers/go-digest" + "github.com/openshift/library-go/pkg/image/registryclient/v2/auth" "github.com/openshift/oc/pkg/cli/image/imagesource" ) diff --git a/pkg/cli/image/mirror/mirror.go b/pkg/cli/image/mirror/mirror.go index d3c5ca771b..9fb60bad80 100644 --- a/pkg/cli/image/mirror/mirror.go +++ b/pkg/cli/image/mirror/mirror.go @@ -12,10 +12,8 @@ import ( "github.com/distribution/distribution/v3" "github.com/distribution/distribution/v3/manifest/manifestlist" "github.com/distribution/distribution/v3/manifest/ocischema" - "github.com/distribution/distribution/v3/manifest/schema1" "github.com/distribution/distribution/v3/manifest/schema2" - "github.com/distribution/distribution/v3/reference" - "github.com/distribution/distribution/v3/registry/client" + "github.com/distribution/reference" units "github.com/docker/go-units" godigest "github.com/opencontainers/go-digest" "github.com/spf13/cobra" @@ -28,7 +26,10 @@ import ( kcmdutil "k8s.io/kubectl/pkg/cmd/util" "k8s.io/kubectl/pkg/util/templates" - "github.com/openshift/library-go/pkg/image/registryclient" + "github.com/openshift/library-go/pkg/image/registryclient/v2" + "github.com/openshift/library-go/pkg/image/registryclient/v2/clienterrors" + "github.com/openshift/library-go/pkg/image/registryclient/v2/manifest/schema1" + "github.com/openshift/oc/internal/distributionopts" "github.com/openshift/oc/pkg/cli/image/imagesource" imagemanifest "github.com/openshift/oc/pkg/cli/image/manifest" "github.com/openshift/oc/pkg/cli/image/workqueue" @@ -526,7 +527,7 @@ func (o *MirrorImageOptions) plan() (*plan, error) { srcDigest := godigest.Digest(srcDigestString) srcManifest, err := manifests.Get(ctx, godigest.Digest(srcDigest), imagemanifest.PreferManifestList) if err != nil { - var unexpectedHTTPResponseError *client.UnexpectedHTTPResponseError + var unexpectedHTTPResponseError *clienterrors.UnexpectedHTTPResponseError if o.SkipMissing && errors.As(err, &unexpectedHTTPResponseError) { if unexpectedHTTPResponseError.StatusCode == 404 { fmt.Fprintf(o.ErrOut, "warning: Image %s does not exist and will not be mirrored\n", err) @@ -540,7 +541,7 @@ func (o *MirrorImageOptions) plan() (*plan, error) { // filter or load manifest list as appropriate originalSrcDigest := srcDigest - srcChildren, srcManifest, srcDigest, err := imagemanifest.ProcessManifestList(ctx, srcDigest, srcManifest, manifests, src.ref.Ref, o.FilterOptions.IncludeAll, o.KeepManifestList) + srcChildren, srcManifest, srcDigest, err := imagemanifest.ProcessManifestList(ctx, srcDigest, srcManifest, srcRepo, src.ref.Ref, o.FilterOptions.IncludeAll, o.KeepManifestList) if err != nil { plan.AddError(retrieverError{src: src.ref, err: err}) return @@ -693,7 +694,7 @@ func copyBlob(ctx context.Context, plan *workPlan, c *repositoryBlobCopy, blob d if err != nil { return fmt.Errorf("unexpected error building named digest: %v", err) } - options = append(options, client.WithMountFrom(blobSource), WithDescriptor(blob)) + options = append(options, distributionopts.WithMountFrom(blobSource), WithDescriptor(blob)) } } @@ -854,9 +855,9 @@ func (s *descriptorBlobSource) Open(ctx context.Context, desc distribution.Descr klog.V(5).Infof("Failed to retrieve blob %s from %s: %v", desc.Digest, url, err) continue } - if !client.SuccessStatus(resp.StatusCode) { + if !clienterrors.SuccessStatus(resp.StatusCode) { resp.Body.Close() - if err := client.HandleErrorResponse(resp); err != nil { + if err := clienterrors.HandleErrorResponse(resp); err != nil { klog.V(5).Infof("Failed to retrieve blob %s from %s: %v", desc.Digest, url, err) continue } diff --git a/pkg/cli/image/strategy/explicit.go b/pkg/cli/image/strategy/explicit.go index 9f2121a277..a6286ddb64 100644 --- a/pkg/cli/image/strategy/explicit.go +++ b/pkg/cli/image/strategy/explicit.go @@ -8,7 +8,7 @@ import ( "k8s.io/klog/v2" "github.com/openshift/library-go/pkg/image/reference" - "github.com/openshift/library-go/pkg/image/registryclient" + "github.com/openshift/library-go/pkg/image/registryclient/v2" ) type explicitICSPStrategy struct { diff --git a/pkg/cli/image/strategy/onerror.go b/pkg/cli/image/strategy/onerror.go index 3bd9ffb340..c87e2d2b75 100644 --- a/pkg/cli/image/strategy/onerror.go +++ b/pkg/cli/image/strategy/onerror.go @@ -18,7 +18,7 @@ import ( apicfgv1scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" operatorv1alpha1scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" "github.com/openshift/library-go/pkg/image/reference" - "github.com/openshift/library-go/pkg/image/registryclient" + "github.com/openshift/library-go/pkg/image/registryclient/v2" utilyaml "k8s.io/apimachinery/pkg/util/yaml" ) diff --git a/pkg/cli/registry/info/info.go b/pkg/cli/registry/info/info.go index a4416d0417..d5e9f0b9ad 100644 --- a/pkg/cli/registry/info/info.go +++ b/pkg/cli/registry/info/info.go @@ -6,7 +6,6 @@ import ( "net/http" "net/url" - "github.com/distribution/distribution/v3/registry/client/transport" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -18,7 +17,8 @@ import ( imageclient "github.com/openshift/client-go/image/clientset/versioned" "github.com/openshift/library-go/pkg/image/reference" - "github.com/openshift/library-go/pkg/image/registryclient" + "github.com/openshift/library-go/pkg/image/registryclient/v2" + "github.com/openshift/library-go/pkg/image/registryclient/v2/transport" ) var ( diff --git a/pkg/cli/registry/login/login.go b/pkg/cli/registry/login/login.go index b24243073a..8a9e0734b3 100644 --- a/pkg/cli/registry/login/login.go +++ b/pkg/cli/registry/login/login.go @@ -26,7 +26,7 @@ import ( imageclient "github.com/openshift/client-go/image/clientset/versioned" "github.com/openshift/library-go/pkg/image/reference" - "github.com/openshift/library-go/pkg/image/registryclient" + "github.com/openshift/library-go/pkg/image/registryclient/v2" "github.com/openshift/oc/pkg/helpers/image" ) diff --git a/pkg/helpers/describe/describer.go b/pkg/helpers/describe/describer.go index 1db4dbd90d..3e3a91f4bb 100644 --- a/pkg/helpers/describe/describer.go +++ b/pkg/helpers/describe/describer.go @@ -2114,7 +2114,6 @@ func describeInsightsDataGathers(idg *configv1alpha2.InsightsDataGather) (string fmt.Fprintf(out, "Name:\t%s\n", idg.Name) fmt.Fprintf(out, "GatherConfig:\t\n") fmt.Fprintf(out, " DataPolicy:\t%s\n", insightsDataPolicyToString(idg.Spec.GatherConfig.DataPolicy)) - return nil }) } diff --git a/pkg/helpers/image/test/util.go b/pkg/helpers/image/test/util.go index 8675bc324c..a5984e498c 100644 --- a/pkg/helpers/image/test/util.go +++ b/pkg/helpers/image/test/util.go @@ -4,7 +4,6 @@ import ( "fmt" "time" - "github.com/distribution/distribution/v3/manifest/schema1" "github.com/distribution/distribution/v3/manifest/schema2" imagespecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -19,6 +18,7 @@ import ( buildv1 "github.com/openshift/api/build/v1" dockerv10 "github.com/openshift/api/image/docker10" imagev1 "github.com/openshift/api/image/v1" + "github.com/openshift/library-go/pkg/image/registryclient/v2/manifest/schema1" ) const ( diff --git a/vendor/github.com/distribution/distribution/v3/.gitignore b/vendor/github.com/distribution/distribution/v3/.gitignore index 4cf7888e92..45fc33b19c 100644 --- a/vendor/github.com/distribution/distribution/v3/.gitignore +++ b/vendor/github.com/distribution/distribution/v3/.gitignore @@ -36,3 +36,10 @@ bin/* *.sublime-project *.sublime-workspace .idea/* + +tests/miniodata + +# Docs +**/.hugo_build.lock +docs/resources +docs/public diff --git a/vendor/github.com/distribution/distribution/v3/.golangci.yml b/vendor/github.com/distribution/distribution/v3/.golangci.yml index 64a494b448..0f4d6ce0ec 100644 --- a/vendor/github.com/distribution/distribution/v3/.golangci.yml +++ b/vendor/github.com/distribution/distribution/v3/.golangci.yml @@ -6,11 +6,13 @@ linters: - goimports - revive - ineffassign - - vet + - govet - unused - misspell - disable: + - bodyclose + - prealloc - errcheck + - tparallel linters-settings: revive: @@ -20,7 +22,6 @@ linters-settings: - name: unused-parameter disabled: true -run: - deadline: 2m - skip-dirs: +issues: + exclude-dirs: - vendor diff --git a/vendor/github.com/distribution/distribution/v3/.mailmap b/vendor/github.com/distribution/distribution/v3/.mailmap index 0f48321d49..debd4ff4d7 100644 --- a/vendor/github.com/distribution/distribution/v3/.mailmap +++ b/vendor/github.com/distribution/distribution/v3/.mailmap @@ -1,32 +1,213 @@ -Stephen J Day Stephen Day -Stephen J Day Stephen Day -Olivier Gambier Olivier Gambier -Brian Bland Brian Bland -Brian Bland Brian Bland -Josh Hawn Josh Hawn -Richard Scothern Richard -Richard Scothern Richard Scothern -Andrew Meredith Andrew Meredith -harche harche -Jessie Frazelle -Sharif Nassar Sharif Nassar -Sven Dowideit Sven Dowideit -Vincent Giersch Vincent Giersch -davidli davidli -Omer Cohen Omer Cohen -Eric Yang Eric Yang -Nikita Tarasov Nikita -Yu Wang yuwaMSFT2 +Aaron Lehmann +Aaron Lehmann +Akihiro Suda +Akihiro Suda +Akihiro Suda +Alexander Morozov +Alexander Morozov +Anders Ingemann +Andrew Meredith +Andrew Meredith +Andrey Smirnov +Andrii Soldatenko +Andrii Soldatenko +andy-cooper +Ankur Kothiwal +Anthony Ramahay +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Austin Vazquez +Benjamin Schanzel +Brian Bland +Brian Bland +Brian Bland +Chad Faragher +Chun-Hung Hsiao +Cory Snider +CrazyMax +CrazyMax <1951866+crazy-max@users.noreply.github.com> +CrazyMax +Cristian Staretu +Cristian Staretu +Cristian Staretu +Daniel Nephin +Daniel Nephin +David Karlsson +David Karlsson <35727626+dvdksn@users.noreply.github.com> +David Wu +David Wu +Derek McGowan +Derek McGowan +Dimitar Kostadinov +Doug Davis +Doug Davis +Emmanuel Ferdman +Eng Zer Jun +Eric Yang +Eric Yang +Eric Yang +Erica Windisch +Erica Windisch +Flavian Missi +Guillaume J. Charmes +Guillaume J. Charmes +Guillaume J. Charmes +Guillaume J. Charmes +Guillaume J. Charmes +Hayley Swimelar +Ismail Alidzhikov +Jaime Martinez +James Hewitt +Jan-Otto Kröpke +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Joao Fernandes +Joao Fernandes +João Pereira <484633+joaodrp@users.noreply.github.com> +Joffrey F +Joffrey F +Joffrey F +Johan Euphrosine +Johan Euphrosine +John Howard +John Howard +Jose D. Gomez R +Josh Hawn +Josh Hawn +Joyce Brum +Joyce Brum +Justin Cormack +Justin Cormack +Justin Cormack +Kirat Singh +Kirat Singh +Kirat Singh +krynju +Kyle Squizzato +Liang Zheng +Luca Bruno +Luca Bruno +Mahmoud Kandil <47168819+MahmoudKKandil@users.noreply.github.com> +Manish Tomar +Manish Tomar +Maria Bermudez +Maria Bermudez +Markus Thömmes +Matheus Macabu +Mateusz Urbanek +Matt Linville +Matt Linville +Matt Linville +Michael Crosby +Michael Crosby +Michael Crosby +Michael Crosby +Michael Crosby +Michal Minar +Michal Minar Michal Minář +Mike Brown +Mike Brown +Mikel Rychliski +Mikhail f. Shiryaev +Milos Gajdos +Milos Gajdos <1392526+milosgajdos@users.noreply.github.com> +Milos Gajdos +Nikita Tarasov +Nikita Tarasov +Oleg Bulatov +Oleg Bulatov +Oleg Gnusarev +Olivier Gambier +Olivier Gambier +Omer Cohen +Omer Cohen +Paul Meyer <49727155+katexochen@users.noreply.github.com> +Per Lundberg +Per Lundberg +Peter Dave Hello +Peter Dave Hello +Phil Estes +Phil Estes +Phil Estes +Pratik +Rafael Fonseca +Richard Scothern +Richard Scothern +Rober Morales-Chaparro +Rober Morales-Chaparro +Robin Ketelbuters +Sebastiaan van Stijn +Sebastiaan van Stijn +Sebastiaan van Stijn +Sebastiaan van Stijn +Sharif Nassar +Sharif Nassar +Solomon Hykes +Solomon Hykes +Solomon Hykes +Solomon Hykes +Solomon Hykes +Stephen Day +Stephen Day +Stephen Day +Steven Kalt +Sven Dowideit +Sven Dowideit +Sylvain DESGRAIS +Tadeusz Dudkiewicz +Thomas Way +Tibor +Tibor Vass +Tibor Vass +Tibor Vass +Vadim Bauer +Victor Barbu +Victor Vieux +Victor Vieux +Victor Vieux +Victor Vieux +Victor Vieux +Victor Vieux +Victor Vieux +Victor Vieux +Victoria Bialas +Victoria Bialas +Vincent Batts +Vincent Batts +Vincent Demeester +Vincent Demeester +Vincent Demeester +Vincent Demeester +Vincent Giersch +Vincent Giersch +vitshev +Wang Yan +Wen-Quan Li +Wen-Quan Li +Wen-Quan Li +Yu Wang Yu Wang Yu Wang (UC) -Olivier Gambier dmp -Olivier Gambier Olivier -Olivier Gambier Olivier -Elsan Li 李楠 elsanli(李楠) -Rui Cao ruicao -Gwendolynne Barr gbarr01 -Haibing Zhou 周海兵 zhouhaibing089 -Feng Honglin tifayuki -Helen Xie Helen-xie -Mike Brown Mike Brown -Manish Tomar Manish Tomar -Sakeven Jiang sakeven +baojiangnan +baojiangnan +erezrokah +goodactive +gotgelf +guoguangwu +harche +harche +icefed +oliver-goetz +tomoya-kawaguchi +xiaoxiangxianzi diff --git a/vendor/github.com/distribution/distribution/v3/AUTHORS b/vendor/github.com/distribution/distribution/v3/AUTHORS new file mode 100644 index 0000000000..360a284529 --- /dev/null +++ b/vendor/github.com/distribution/distribution/v3/AUTHORS @@ -0,0 +1,571 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see dockerfiles/authors.Dockerfile. + +a-palchikov +Aaron Lehmann +Aaron Schlesinger +Aaron Vinson +Adam Dobrawy +Adam Duke +Adam Enger +Adam Kaplan +Adam Wolfe Gordon +AdamKorcz +Adrian Mouat +Adrian Plata +Adrien Duermael +Ahmet Alp Balkan +Aidan Hobson Sayers +Akihiro Suda +Aleksejs Sinicins +Alex +Alex Chan +Alex Elman +Alex Laties +Alexander Larsson +Alexander Morozov +Alexey Gladkov +Alfonso Acosta +allencloud +Alvin Feng +amitshukla +Amy Lindburg +Anders Ingemann +Andreas Hassing +Andrew Bulford +Andrew Hsu +Andrew Lavery +Andrew Leung +Andrew Lively +Andrew Meredith +Andrew T Nguyen +Andrews Medina +Andrey Kostov +Andrey Smirnov +Andrii Soldatenko +Andy Goldstein +andy-cooper +andyzhangx +Anian Z +Anil Belur +Anis Elleuch +Ankur Kothiwal +Ankush Agarwal +Anne Henmi <41210220+ahh-docker@users.noreply.github.com> +Anthony Ramahay +Anton Tiurin +Antonio Mercado +Antonio Murdaca +Antonio Ojea +Anusha Ragunathan +Arien Holthuizen +Arko Dasgupta +Arnaud Porterie +Arthur Baars +Arthur Gautier +Asuka Suzuki +Austin Vazquez +Avi Miller +Aviral Takkar +Ayose Cazorla +BadZen +baojiangnan +Ben Bodenmiller +Ben De St Paer-Gotch +Ben Emamian +Ben Firshman +Ben Kochie +Ben Manuel +Benjamin Schanzel +Bhavin Gandhi +Bill +bin liu +Bouke van der Bijl +Bracken Dawson +Brandon Mitchell +Brandon Philips +Brett Higgins +Brian Bland +Brian Goff +burnettk +Caleb Spare +Carson A +Cezar Sa Espinola +Chad Faragher +Chaos John +Charles Smith +Cheng Zheng +chlins +Chris Aniszczyk +Chris Dillon +Chris K. Wong +Chris Patterson +Christopher Yeleighton +Christy Perez +Chuanying Du +Chun-Hung Hsiao +Clayton Coleman +Collin Shoop +Corey Quon +Cory Snider +CrazyMax +cressie176 +Cristian Staretu +cui fliter +cuiwei13 +cyli +Daehyeok Mun +Daisuke Fujita +Damien Mathieu +Dan Fredell +Dan Walsh +Daniel Helfand +Daniel Huhn +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel, Dao Quang Minh +Danila Fominykh +Darren Shepherd +Dave +Dave Trombley +Dave Tucker +David Calavera +David Justice +David Karlsson +David Lawrence +David Luu +David Mackey +David van der Spek +David Verhasselt +David Wu +David Xia +Dawn W Docker +ddelange <14880945+ddelange@users.noreply.github.com> +Dejan Golja +Denis Andrejew +dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> +Derek +Derek McGowan +Deshi Xiao +Dimitar Kostadinov +Diogo Mónica +DJ Enriquez +Djibril Koné +dmp +Don Bowman +Don Kjer +Donald Huang +Doug Davis +drornir +duanhongyi +ducksecops +E. M. Bray +Edgar Lee +Elliot Pahl +elsanli(李楠) +Elton Stoneman +Emmanuel Briney +Emmanuel Ferdman +Eng Zer Jun +Eohyung Lee +erezrokah +Eric Yang +Erica Windisch +Erik Hollensbe +Etki +Eugene Lubarsky +evanebb +eyjhb +eyjhbb@gmail.com +Fabio Berchtold +Fabio Falci +Fabio Huser +farmerworking +fate-grand-order +Felix Bünemann +Felix Yan +Feng Honglin +Fernando Mayo Fernandez +Flavian Missi +Florentin Raud +forkbomber +Frank Chen +Frederick F. Kautz IV +Gabor Nagy +gabriell nascimento +Gaetan +gary schaetz +gbarr01 +Geoffrey Hausheer +ghodsizadeh +Giovanni Toraldo +Gladkov Alexey +Gleb M Borisov +Gleb Schukin +glefloch +Glyn Owen Hanmer <1295698+glynternet@users.noreply.github.com> +goodactive +gotgelf +Grachev Mikhail +Grant Watters +Greg Rebholz +Guillaume J. Charmes +Guillaume Rose +guoguangwu +Gábor Lipták +harche +hasheddan +Hayley Swimelar +Helen-xie +Henri Gomez +Honglin Feng +Hu Keping +Hua Wang +HuKeping +Huu Nguyen +Ian Babrou +iasoon +icefed +igayoso +Igor Dolzhikov +Igor Morozov +Ihor Dvoretskyi +Ilion Beyst +Ina Panova +Irene Diez +Ismail Alidzhikov +Jack Baines +Jack Griffin +Jacob Atzen +Jaime Martinez +Jake Moshenko +Jakob Ackermann +Jakub Mikulas +James Findley +James Hewitt +James Lal +Jan-Otto Kröpke +Jason Freidman +Jason Heiss +Javier Palomo Almena +jdolitsky <393494+jdolitsky@users.noreply.github.com> +Jeff Nickoloff +Jeffrey van Gogh +jerae-duffin <83294991+jerae-duffin@users.noreply.github.com> +Jeremy THERIN +Jesse Brown +Jesse Haka +Jessica Frazelle +jhaohai +Jianqing Wang +Jihoon Chung +Jim Galasyn +Joao Fernandes +Joffrey F +Johan Euphrosine +John Howard +John Mulhausen +John Starks +Jon Johnson +Jon Poler +Jonas Hecht +Jonathan Boulle +Jonathan Lee +Jonathan Rudenberg +Jordan Liggitt +Jose D. Gomez R +Josh Chorlton +Josh Dolitsky +Josh Hawn +Josiah Kiehl +Joyce Brum +João Pereira <484633+joaodrp@users.noreply.github.com> +Julien Bordellier <1444415+jstoja@users.noreply.github.com> +Julien Fernandez +Justas Brazauskas +Justin Cormack +Justin I. Nevill +Justin Santa Barbara +kaiwentan +Ke Xu +Keerthan Mala +Kelsey Hightower +Ken Cochrane +Kenneth Lim +Kenny Leung +Kevin Lin +Kevin Robatel +Kira +Kirat Singh +krynju +Kyle Squizzato +Kyle Squizzato +L-Hudson <44844738+L-Hudson@users.noreply.github.com> +Lachlan Cooper +Laura Brehm +Lei Jitang +Lenny Linux +Leonardo Azize Martins +leonstrand +Li Yi +Liam White +Liang Zheng +libo.huang +LingFaKe +Liron Levin +lisong +Littlemoon917 <18084421+Littlemoon917@users.noreply.github.com> +Liu Hua +liuchang0812 +liyongxin +Lloyd Ramey +lostsquirrel +Louis Kottmann +Luca Bruno +Lucas França de Oliveira +Lucas Santos +Luis Lobo Borobia +Luke Carpenter +Ma Shimiao +Mahmoud Kandil <47168819+MahmoudKKandil@users.noreply.github.com> +Makoto Oda +mallchin +Manish Tomar +Marco Hennings +Marcus Martins +Maria Bermudez +Mark Sagi-Kazar +Markus Thömmes +Mary Anthony +Masataka Mizukoshi +Mateusz Urbanek +Matheus Macabu +Matin Rahmanian +MATSUMOTO TAKEAKI +Matt Bentley +Matt Duch +Matt Linville +Matt Moore +Matt Robenolt +Matt Tescher +Matthew Balvanz +Matthew Green +Matthew Riley +Maurice Sotzny +Meaglith Ma +Michael Bonfils +Michael Crosby +Michael Prokop +Michael Vetter +Michal Fojtik +Michal Gebauer +Michal Guerquin +Michal Minar +Mike Brown +Mike Lundy +Mike Truman +Mikel Rychliski +Mikhail f. Shiryaev +Milos Gajdos +Miquel Sabaté +mlmhl <409107750@qq.com> +Monika Katiyar +Morgan Bauer +moxiegirl +mqliang +Muesli +Nan Monnand Deng +Nat Zimmermann +Nathan Sullivan +Naveed Jamil +Neil Wilson +nevermosby +Nghia Tran +Nicolas De Loof +Nikita Tarasov +ning xie +Nishant Totla +Noah Treuhaft +Novak Ivanovski +Nuutti Kotivuori +Nycholas de Oliveira e Oliveira +Oilbeater +Oleg Bulatov +Oleg Gnusarev +olegburov +oliver-goetz +Olivier +Olivier Gambier +Olivier Jacques +ollypom +Omer Cohen +Oscar Caballero +Owen W. Taylor +paigehargrave +Parth Mehrotra +Pascal Borreli +Patrick Devine +Patrick Easters +Paul Cacheux +Paul Meyer <49727155+katexochen@users.noreply.github.com> +Pavel Antonov +Paweł Gronowski +Per Lundberg +Peter Choi +Peter Dave Hello +Peter Kokot +Phil Estes +Philip Misiowiec +Pierre-Yves Ritschard +Pieter Scheffers +Pratik +Qiang Huang +Qiao Anran +Radon Rosborough +Rafael Fonseca +Randy Barlow +Raphaël Enrici +Ricardo Maraschini +Richard Scothern +Rick Wieman +Rik Nijessen +Riyaz Faizullabhoy +Rober Morales-Chaparro +Robert Kaussow +Robert Steward +Roberto G. Hashioka +Robin Ketelbuters +Rodolfo Carvalho +ROY +Rui Cao +ruicao +Rusty Conover +Ryan Abrams +Ryan Thomas +sakeven +Sam Alba +Samuel Karp +sangluo +Santiago Torres +Sargun Dhillon +sayboras +Sean Boran +Sean P. Kane +Sebastiaan van Stijn +Sebastien Coavoux +Serge Dubrouski +Sevki Hasirci +Sharif Nassar +Shawn Chen +Shawn Falkner-Horine +Shawnpku +Shengjing Zhu +Shiela M Parker +Shishir Mahajan +Shreyas Karnik +Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> +Simon +Simon Thulbourn +Simone Locci +Smasherr +Solomon Hykes +Sora Morimoto +spacexnice +Spencer Rinehart +srajmane <31947381+srajmane@users.noreply.github.com> +Srini Brahmaroutu +Stan Hu +Stefan Lörwald <10850250+stefanloerwald@users.noreply.github.com> +Stefan Majewsky +Stefan Nica +Stefan Weil +Stephen Day +Steve Lasker +Steven Hanna +Steven Kalt +Steven Taylor +stonezdj +sun jian +Sungho Moon +Sven Dowideit +Sylvain Baubeau +Sylvain DESGRAIS +syntaxkim <40621244+syntaxkim@users.noreply.github.com> +T N +t-eimizu +Tadeusz Dudkiewicz +Tariq Ibrahim +TaylorKanper +Ted Reed +Terin Stock +tgic +Thomas Berger +Thomas Sjögren +Thomas Way +Tianon Gravi +Tibor +Tibor Vass +tifayuki +Tiger Kaovilai +Tobias Fuhrimann +Tobias Schwab +Tom Hayward +Tom Hu +tomoya-kawaguchi +Tonis Tiigi +Tony Holdstock-Brown +Tosone +Trapier Marshall +Trevor Pounds +Trevor Wood +Troels Thomsen +uhayate +Usha Mandya <47779042+usha-mandya@users.noreply.github.com> +Usha Mandya +Vadim Bauer +Vaidas Jablonskis +Vega Chou +Veres Lajos +Victor Barbu +Victor Vieux +Victoria Bialas +Vidar +Viktor Stanchev +Vincent Batts +Vincent Demeester +Vincent Giersch +Vishesh Jindal +vitshev +Vitshev +W. Trevor King +Wang Jie +Wang Yan +Wassim Dhif +wayne +Wei Fu +Wei Meng +weiyuan.yl +Wen-Quan Li +Wenkai Yin +william wei <1342247033@qq.com> +xg.song +xiaoxiangxianzi +xiekeyang +Xueshan Feng +Yann ROBERT +Yannick Fricke +yaoyao.xyy +yixi zhang +Yong Tang +Yong Wen Chua +Yongxin Li +Yu Wang +yuexiao-wang +YuJie <390282283@qq.com> +yuzou +Zhang Wei +zhipengzuo +zhouhaibing089 +zounengren +姜继忠 diff --git a/vendor/github.com/distribution/distribution/v3/BUILDING.md b/vendor/github.com/distribution/distribution/v3/BUILDING.md index 1fe2269bf7..5867e2aabd 100644 --- a/vendor/github.com/distribution/distribution/v3/BUILDING.md +++ b/vendor/github.com/distribution/distribution/v3/BUILDING.md @@ -7,31 +7,29 @@ This is useful if you intend to actively work on the registry. ### Alternatives -Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). +Most people should use prebuilt images, for example, the [Registry docker image](https://hub.docker.com/r/library/registry/) provided by Docker. People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. -OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). +The latest updates to `main` branch are automatically pushed to [distribution Docker Hub repository](https://hub.docker.com/r/distribution/distribution) and tagged with `edge` tag. ### Gotchas -You are expected to know your way around with go & git. +You are expected to know your way around with `go` & `git`. -If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. +If you are a casual user with no development experience, and no preliminary knowledge of Go, building from source is probably not a good solution for you. ## Configure the development environment The first prerequisite of properly building distribution targets is to have a Go -development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) -for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the -environment. +development environment setup. Please follow [How to Write Go Code](https://go.dev/doc/code) for proper setup. Next, fetch the code from the repository using git: git clone https://github.com/distribution/distribution cd distribution -If you are planning to create a pull request with changes, you may want to clone directly from your [fork](https://help.github.com/en/articles/about-forks). +If you are planning to create a pull request with changes, you may want to clone directly from your [fork](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/about-forks). ## Build and run from source @@ -97,13 +95,35 @@ Run `make validate` to run the validators, including the linter and vendor valid ### Optional build tags Optional [build tags](http://golang.org/pkg/go/build/) can be provided using -the environment variable `DOCKER_BUILDTAGS`. +the environment variable `BUILDTAGS`.
noresumabledigest
Compiles without resumable digest support
-
include_gcs
-
Adds support for Google Cloud Storage
-
include_oss
-
Adds support for Alibaba Cloud Object Storage Service (OSS)
+ +### Local S3 store environment + +You can run an S3 API compatible store locally with [minio](https://min.io/). + +You must have a [docker compose](https://docs.docker.com/compose/) compatible tool installed on your workstation. + +Start the local S3 store environment: +``` +make start-s3-storage +``` +There is a sample registry configuration file that lets you point the registry to the started storage: +``` +AWS_ACCESS_KEY=distribution \ + AWS_SECRET_KEY=password \ + AWS_REGION=us-east-1 \ + S3_BUCKET=images-local \ + S3_ENCRYPT=false \ + REGION_ENDPOINT=http://127.0.0.1:9000 \ + S3_SECURE=false \ +./bin/registry serve tests/conf-local-s3.yml +``` +Stop the local S3 store when done: +``` +make stop-s3-storage +``` diff --git a/vendor/github.com/distribution/distribution/v3/CODE-OF-CONDUCT.md b/vendor/github.com/distribution/distribution/v3/CODE-OF-CONDUCT.md index 21f813523d..48f6704c6d 100644 --- a/vendor/github.com/distribution/distribution/v3/CODE-OF-CONDUCT.md +++ b/vendor/github.com/distribution/distribution/v3/CODE-OF-CONDUCT.md @@ -1,36 +1,5 @@ -This project has adopted the [CNCF Community Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) +# Code of Conduct -### Contributor Code of Conduct +We follow the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). -As contributors and maintainers of this project, and in the interest of fostering -an open and welcoming community, we pledge to respect all people who contribute -through reporting issues, posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project a harassment-free experience for -everyone, regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, body size, race, ethnicity, age, -religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing others' private information, such as physical or electronic addresses, - without explicit permission -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are not -aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers -commit themselves to fairly and consistently applying these principles to every aspect -of managing this project. Project maintainers who do not follow or enforce the Code of -Conduct may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by -contacting a CNCF project maintainer or our mediator, Mishi Choudhary . +Please contact the [CNCF Code of Conduct Committee](mailto:conduct@cncf.io) in order to report violations of the Code of Conduct. diff --git a/vendor/github.com/distribution/distribution/v3/Dockerfile b/vendor/github.com/distribution/distribution/v3/Dockerfile index c6ebb78927..8cbce906ba 100644 --- a/vendor/github.com/distribution/distribution/v3/Dockerfile +++ b/vendor/github.com/distribution/distribution/v3/Dockerfile @@ -1,8 +1,8 @@ # syntax=docker/dockerfile:1 -ARG GO_VERSION=1.19.9 -ARG ALPINE_VERSION=3.16 -ARG XX_VERSION=1.2.1 +ARG GO_VERSION=1.23.7 +ARG ALPINE_VERSION=3.21 +ARG XX_VERSION=1.6.1 FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base @@ -16,18 +16,18 @@ FROM base AS version ARG PKG=github.com/distribution/distribution/v3 RUN --mount=target=. \ VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \ - echo "-X ${PKG}/version.Version=${VERSION#v} -X ${PKG}/version.Revision=${REVISION} -X ${PKG}/version.Package=${PKG}" | tee /tmp/.ldflags; \ + echo "-X ${PKG}/version.version=${VERSION#v} -X ${PKG}/version.revision=${REVISION} -X ${PKG}/version.mainpkg=${PKG}" | tee /tmp/.ldflags; \ echo -n "${VERSION}" | tee /tmp/.version; FROM base AS build ARG TARGETPLATFORM ARG LDFLAGS="-s -w" -ARG BUILDTAGS="include_oss include_gcs" -RUN --mount=type=bind,target=/src,rw \ +ARG BUILDTAGS="" +RUN --mount=type=bind,target=/src \ --mount=type=cache,target=/root/.cache/go-build \ --mount=target=/go/pkg/mod,type=cache \ --mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=version \ - set -x ; xx-go build -trimpath -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/registry ./cmd/registry \ + set -x ; xx-go build -tags "${BUILDTAGS}" -trimpath -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/registry ./cmd/registry \ && xx-verify --static /usr/bin/registry FROM scratch AS binary @@ -52,9 +52,9 @@ COPY --from=releaser /out / FROM alpine:${ALPINE_VERSION} RUN apk add --no-cache ca-certificates -COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml +COPY cmd/registry/config-dev.yml /etc/distribution/config.yml COPY --from=binary /registry /bin/registry VOLUME ["/var/lib/registry"] EXPOSE 5000 ENTRYPOINT ["registry"] -CMD ["serve", "/etc/docker/registry/config.yml"] +CMD ["serve", "/etc/distribution/config.yml"] diff --git a/vendor/github.com/distribution/distribution/v3/GOVERNANCE.md b/vendor/github.com/distribution/distribution/v3/GOVERNANCE.md index bd40c44af2..17067f7a85 100644 --- a/vendor/github.com/distribution/distribution/v3/GOVERNANCE.md +++ b/vendor/github.com/distribution/distribution/v3/GOVERNANCE.md @@ -94,7 +94,7 @@ performance must not be discussed on the pull request. ## How are decisions made? -Docker distribution is an open-source project with an open design philosophy. +CNCF distribution is an open-source project with an open design philosophy. This means that the repository is the source of truth for EVERY aspect of the project, including its philosophy, design, road map, and APIs. *If it's part of the project, it's in the repo. If it's in the repo, it's part of the project.* diff --git a/vendor/github.com/distribution/distribution/v3/MAINTAINERS b/vendor/github.com/distribution/distribution/v3/MAINTAINERS index f6147b8b99..f5c1494bdd 100644 --- a/vendor/github.com/distribution/distribution/v3/MAINTAINERS +++ b/vendor/github.com/distribution/distribution/v3/MAINTAINERS @@ -11,10 +11,9 @@ "heww","He Weiwei","hweiwei@vmware.com" "joaodrp","João Pereira","jpereira@gitlab.com" "justincormack","Justin Cormack","justin.cormack@docker.com" -"squizzi","Kyle Squizzato","ksquizzato@mirantis.com" -"milosgajdos","Milos Gajdos","milos.gajdos@docker.com" +"squizzi","Kyle Squizzato","ksquizz@gmail.com" +"milosgajdos","Milos Gajdos","milosthegajdos@gmail.com" "sargun","Sargun Dhillon","sargun@sargun.me" -"waynr","Wayne Warren","wwarren@digitalocean.com" "wy65701436","Wang Yan","wangyan@vmware.com" "stevelasker","Steve Lasker","steve.lasker@microsoft.com" # @@ -23,3 +22,5 @@ "dmcgowan","Derek McGowan","derek@mcgstyle.net" "stevvooe","Stephen Day","stevvooe@gmail.com" "thajeztah","Sebastiaan van Stijn","github@gone.nl" +"DavidSpek", "David van der Spek", "vanderspek.david@gmail.com" +"Jamstah", "James Hewitt", "james.hewitt@gmail.com" diff --git a/vendor/github.com/distribution/distribution/v3/Makefile b/vendor/github.com/distribution/distribution/v3/Makefile index 42db005ed9..9647ed9a02 100644 --- a/vendor/github.com/distribution/distribution/v3/Makefile +++ b/vendor/github.com/distribution/distribution/v3/Makefile @@ -1,3 +1,5 @@ +.DEFAULT_GOAL := help + # Root directory of the project (absolute path). ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) @@ -5,6 +7,8 @@ ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) VERSION ?= $(shell git describe --match 'v[0-9]*' --dirty='.m' --always) REVISION ?= $(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi) +# default compose command +COMPOSE ?= docker compose PKG=github.com/distribution/distribution/v3 @@ -13,6 +17,9 @@ PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/) INTEGRATION_PACKAGE=${PKG} COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES}) +IMAGE_REPO ?= distribution/distribution +IMAGE_TAG ?= latest +IMAGE_NAME ?= $(IMAGE_REPO):$(IMAGE_TAG) # Project binaries. COMMANDS=registry digest registry-api-descriptor-template @@ -30,7 +37,7 @@ WHALE = "+" TESTFLAGS_RACE= GOFILES=$(shell find . -type f -name '*.go') GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",) -GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)' +GO_LDFLAGS=-ldflags '-extldflags "-Wl,-z,now" -s -w -X $(PKG)/version.version=$(VERSION) -X $(PKG)/version.revision=$(REVISION) -X $(PKG)/version.mainpkg=$(PKG) $(EXTRA_LDFLAGS)' BINARIES=$(addprefix bin/,$(COMMANDS)) @@ -38,16 +45,52 @@ BINARIES=$(addprefix bin/,$(COMMANDS)) TESTFLAGS ?= -v $(TESTFLAGS_RACE) TESTFLAGS_PARALLEL ?= 8 -.PHONY: all build binaries clean test test-race test-full integration coverage validate lint validate-git validate-vendor vendor mod-outdated +.PHONY: all build binaries clean test test-race test-full integration test-coverage validate lint validate-git validate-vendor vendor mod-outdated image validate-authors authors .DEFAULT: all -all: binaries +.PHONY: FORCE +FORCE: + +##@ Build # This only needs to be generated by hand when cutting full releases. version/version.go: @echo "$(WHALE) $@" ./version/version.sh > $@ +bin/%: cmd/% FORCE ## build individual binary + @echo "$(WHALE) $@${BINARY_SUFFIX}" + @go build -buildmode=pie ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} --ldflags '-extldflags "-Wl,-z,now" -s' ${GO_TAGS} ./$< + +binaries: $(BINARIES) ## build binaries + @echo "$(WHALE) $@" + +build: ## build go packages + @echo "$(WHALE) $@" + @go build -buildmode=pie ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} --ldflags '-extldflags "-Wl,-z,now" -s' ${GO_TAGS} $(PACKAGES) + +image: ## build docker image IMAGE_NAME= + docker buildx bake --set "*.tags=${IMAGE_NAME}" image-local + +clean: ## clean up binaries + @echo "$(WHALE) $@" + @rm -f $(BINARIES) + +vendor: ## update vendor + $(eval $@_TMP_OUT := $(shell mktemp -d -t buildx-output.XXXXXXXXXX)) + docker buildx bake --set "*.output=$($@_TMP_OUT)" update-vendor + rm -rf ./vendor + cp -R "$($@_TMP_OUT)"/out/* . + rm -rf $($@_TMP_OUT)/* + +mod-outdated: ## check outdated dependencies + docker buildx bake $@ + +authors: ## generate authors + docker buildx bake $@ + +##@ Test + test: ## run tests, except integration test with test.short @echo "$(WHALE) $@" @go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) @@ -64,7 +107,7 @@ integration: ## run integration tests @echo "$(WHALE) $@" @go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE} -coverage: ## generate coverprofiles from the unit tests +test-coverage: ## run unit tests and generate test coverprofiles @echo "$(WHALE) $@" @rm -f coverage.txt @go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null @@ -79,28 +122,72 @@ coverage: ## generate coverprofiles from the unit tests fi; \ done ) -FORCE: - -# Build a binary from a cmd. -bin/%: cmd/% FORCE - @echo "$(WHALE) $@${BINARY_SUFFIX}" - @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$< - -binaries: $(BINARIES) ## build binaries - @echo "$(WHALE) $@" - -build: - @echo "$(WHALE) $@" - @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} ${GO_TAGS} $(PACKAGES) - -clean: ## clean up binaries - @echo "$(WHALE) $@" - @rm -f $(BINARIES) +.PHONY: test-s3-storage +test-s3-storage: start-s3-storage run-s3-tests stop-s3-storage ## run s3 storage driver tests + +.PHONY: start-s3-storage +start-s3-storage: ## start local s3 storage (minio) + $(COMPOSE) -f tests/docker-compose-storage.yml up minio minio-init -d + +.PHONY: stop-s3-storage +stop-s3-storage: ## stop local s3 storage (minio) + $(COMPOSE) -f tests/docker-compose-storage.yml down + +.PHONY: reset-s3-storage +reset-s3-storage: ## reset (stop, delete, start) local s3 storage (minio) + $(COMPOSE) -f tests/docker-compose-storage.yml down + @mkdir -p tests/miniodata/distribution + @rm -rf tests/miniodata/distribution/* tests/miniodata/.minio.sys + $(COMPOSE) -f tests/docker-compose-storage.yml up minio minio-init -d + +.PHONY: run-s3-tests +run-s3-tests: start-s3-storage ## run S3 storage driver integration tests + AWS_ACCESS_KEY=distribution \ + AWS_SECRET_KEY=password \ + AWS_REGION=us-east-1 \ + S3_BUCKET=images-local \ + S3_ENCRYPT=false \ + REGION_ENDPOINT=http://127.0.0.1:9000 \ + S3_SECURE=false \ + S3_ACCELERATE=false \ + AWS_S3_FORCE_PATH_STYLE=true \ + go test ${TESTFLAGS} -count=1 ./registry/storage/driver/s3-aws/... + +.PHONY: start-e2e-s3-env +start-e2e-s3-env: ## starts E2E S3 storage test environment (S3, Redis, registry) + $(COMPOSE) -f tests/docker-compose-e2e-cloud-storage.yml up -d + +.PHONY: stop-e2e-s3-env +stop-e2e-s3-env: ## stops E2E S3 storage test environment (S3, Redis, registry) + $(COMPOSE) -f tests/docker-compose-e2e-cloud-storage.yml down + +.PHONY: test-azure-storage +test-azure-storage: start-azure-storage run-azure-tests stop-azure-storage ## run Azure storage driver tests + +.PHONY: start-azure-storage +start-azure-storage: ## start local Azure storage (Azurite) + $(COMPOSE) -f tests/docker-compose-azure-blob-store.yaml up azurite azurite-init -d + +.PHONY: stop-azure-storage +stop-azure-storage: ## stop local Azure storage (minio) + $(COMPOSE) -f tests/docker-compose-azure-blob-store.yaml down + +.PHONY: run-azure-tests +run-azure-tests: start-azure-storage ## run Azure storage driver integration tests + AZURE_SKIP_VERIFY=true \ + AZURE_STORAGE_CREDENTIALS_TYPE="shared_key" \ + AZURE_STORAGE_ACCOUNT_NAME=devstoreaccount1 \ + AZURE_STORAGE_ACCOUNT_KEY="Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" \ + AZURE_STORAGE_CONTAINER=containername \ + AZURE_SERVICE_URL="https://127.0.0.1:10000/devstoreaccount1" \ + go test ${TESTFLAGS} -count=1 ./registry/storage/driver/azure/... + +##@ Validate -validate: ## run all validators +lint: ## run all linters docker buildx bake $@ -lint: ## run all linters +validate: ## run all validators docker buildx bake $@ validate-git: ## validate git @@ -109,12 +196,12 @@ validate-git: ## validate git validate-vendor: ## validate vendor docker buildx bake $@ -vendor: ## update vendor - $(eval $@_TMP_OUT := $(shell mktemp -d -t buildx-output.XXXXXXXXXX)) - docker buildx bake --set "*.output=$($@_TMP_OUT)" update-vendor - rm -rf ./vendor - cp -R "$($@_TMP_OUT)"/out/* . - rm -rf $($@_TMP_OUT)/* - -mod-outdated: ## check outdated dependencies +validate-authors: ## validate authors docker buildx bake $@ + +.PHONY: help +help: + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z0-9_\/%-]+:.*?##/ { printf " \033[36m%-27s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + @echo "" + @echo "Go binaries: $(BINARIES)" + @echo "Docker image: $(IMAGE_NAME)" diff --git a/vendor/github.com/distribution/distribution/v3/README.md b/vendor/github.com/distribution/distribution/v3/README.md index 3af0093ec4..76f837a3a7 100644 --- a/vendor/github.com/distribution/distribution/v3/README.md +++ b/vendor/github.com/distribution/distribution/v3/README.md @@ -1,9 +1,19 @@ -# Distribution +

+ +

+ +[![Build Status](https://github.com/distribution/distribution/workflows/build/badge.svg?branch=main&event=push)](https://github.com/distribution/distribution/actions/workflows/build.yml?query=workflow%3Abuild) +[![GoDoc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/distribution/distribution) +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache--2.0-blue.svg)](LICENSE) +[![codecov](https://codecov.io/gh/distribution/distribution/branch/main/graph/badge.svg)](https://codecov.io/gh/distribution/distribution) +[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Fdistribution.svg?type=shield)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Fdistribution?ref=badge_shield) +[![OCI Conformance](https://github.com/distribution/distribution/workflows/conformance/badge.svg)](https://github.com/distribution/distribution/actions?query=workflow%3Aconformance) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/distribution/distribution/badge)](https://securityscorecards.dev/viewer/?uri=github.com/distribution/distribution) The toolset to pack, ship, store, and deliver content. This repository's main product is the Open Source Registry implementation -for storing and distributing container images using the +for storing and distributing container images and other content using the [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec). The goal of this project is to provide a simple, secure, and scalable base for building a large scale registry solution or running a simple private registry. @@ -11,22 +21,13 @@ It is a core library for many registry operators including Docker Hub, GitHub Co GitLab Container Registry and DigitalOcean Container Registry, as well as the CNCF Harbor Project, and VMware Harbor Registry. - - -[![Build Status](https://github.com/distribution/distribution/workflows/CI/badge.svg?branch=main&event=push)](https://github.com/distribution/distribution/actions?query=workflow%3ACI) -[![GoDoc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/distribution/distribution) -[![License: Apache-2.0](https://img.shields.io/badge/License-Apache--2.0-blue.svg)](LICENSE) -[![codecov](https://codecov.io/gh/distribution/distribution/branch/main/graph/badge.svg)](https://codecov.io/gh/distribution/distribution) -[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Fdistribution.svg?type=shield)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Fdistribution?ref=badge_shield) -[![OCI Conformance](https://github.com/distribution/distribution/workflows/conformance/badge.svg)](https://github.com/distribution/distribution/actions?query=workflow%3Aconformance) - This repository contains the following components: |**Component** |Description | |--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | **registry** | An implementation of the [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec). | | **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://pkg.go.dev/github.com/distribution/distribution) for details. **Note**: The interfaces for these libraries are **unstable**. | -| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | +| **documentation** | Full documentation is available at [https://distribution.github.io/distribution](https://distribution.github.io/distribution/). ### How does this integrate with Docker, containerd, and other OCI client? diff --git a/vendor/github.com/distribution/distribution/v3/SECURITY.md b/vendor/github.com/distribution/distribution/v3/SECURITY.md new file mode 100644 index 0000000000..4357295281 --- /dev/null +++ b/vendor/github.com/distribution/distribution/v3/SECURITY.md @@ -0,0 +1,17 @@ +# Security Policy + +## Supported Versions + +These versions are currently receiving security updates. + +| Version | Supported | Notes | +| ------------ | ------------------ | ----- | +| 3.0.x (main) | :white_check_mark: | This is the next major version and has not yet been released. | +| 2.8.x | :white_check_mark: | This is the latest released version. | +| < 2.8 | :x: | | + +## Reporting a Vulnerability + +The maintainers take security seriously. If you discover a security issue, please bring it to their attention right away! + +Please DO NOT file a public issue, instead send your report privately to cncf-distribution-security@lists.cncf.io. diff --git a/vendor/github.com/distribution/distribution/v3/blobs.go b/vendor/github.com/distribution/distribution/v3/blobs.go index b7583cc7b6..3de30b5920 100644 --- a/vendor/github.com/distribution/distribution/v3/blobs.go +++ b/vendor/github.com/distribution/distribution/v3/blobs.go @@ -8,7 +8,7 @@ import ( "net/http" "time" - "github.com/distribution/distribution/v3/reference" + "github.com/distribution/reference" "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -46,7 +46,7 @@ func (err ErrBlobInvalidDigest) Error() string { // instead of initiating an upload session. type ErrBlobMounted struct { From reference.Canonical - Descriptor Descriptor + Descriptor v1.Descriptor } func (err ErrBlobMounted) Error() string { @@ -58,41 +58,9 @@ func (err ErrBlobMounted) Error() string { // store, a descriptor can be used to fetch, store and target any kind of // blob. The struct also describes the wire protocol format. Fields should // only be added but never changed. -type Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // Size in bytes of content. - Size int64 `json:"size,omitempty"` - - // URLs contains the source URLs of this content. - URLs []string `json:"urls,omitempty"` - - // Annotations contains arbitrary metadata relating to the targeted content. - Annotations map[string]string `json:"annotations,omitempty"` - - // Platform describes the platform which the image in the manifest runs on. - // This should only be used when referring to a manifest. - Platform *v1.Platform `json:"platform,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. -} - -// Descriptor returns the descriptor, to make it satisfy the Describable -// interface. Note that implementations of Describable are generally objects -// which can be described, not simply descriptors; this exception is in place -// to make it more convenient to pass actual descriptors to functions that -// expect Describable objects. -func (d Descriptor) Descriptor() Descriptor { - return d -} +// +// Descriptor is an alias for [v1.Descriptor]. +type Descriptor = v1.Descriptor // BlobStatter makes blob descriptors available by digest. The service may // provide a descriptor of a different digest if the provided digest is not @@ -100,7 +68,7 @@ func (d Descriptor) Descriptor() Descriptor { type BlobStatter interface { // Stat provides metadata about a blob identified by the digest. If the // blob is unknown to the describer, ErrBlobUnknown will be returned. - Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) + Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) } // BlobDeleter enables deleting blobs from storage. @@ -129,7 +97,7 @@ type BlobDescriptorService interface { // Such a facility can be used to map blobs between digest domains, with // the restriction that the algorithm of the descriptor must match the // canonical algorithm (ie sha256) of the annotator. - SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error + SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error // Clear enables descriptors to be unlinked Clear(ctx context.Context, dgst digest.Digest) error @@ -140,12 +108,6 @@ type BlobDescriptorServiceFactory interface { BlobAccessController(svc BlobDescriptorService) BlobDescriptorService } -// ReadSeekCloser is the primary reader type for blob data, combining -// io.ReadSeeker with io.Closer. -// -// Deprecated: use [io.ReadSeekCloser]. -type ReadSeekCloser = io.ReadSeekCloser - // BlobProvider describes operations for getting blob data. type BlobProvider interface { // Get returns the entire blob identified by digest along with the descriptor. @@ -177,7 +139,7 @@ type BlobServer interface { type BlobIngester interface { // Put inserts the content p into the blob service, returning a descriptor // or an error. - Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) + Put(ctx context.Context, mediaType string, p []byte) (v1.Descriptor, error) // Create allocates a new blob writer to add a blob to this service. The // returned handle can be written to and later resumed using an opaque @@ -206,7 +168,7 @@ type CreateOptions struct { From reference.Canonical // Stat allows to pass precalculated descriptor to link and return. // Blob access check will be skipped if set. - Stat *Descriptor + Stat *v1.Descriptor } } @@ -236,7 +198,7 @@ type BlobWriter interface { // stream" to the blob. The returned descriptor may have a different // digest depending on the blob store, referred to as the canonical // descriptor. - Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) + Commit(ctx context.Context, provisional v1.Descriptor) (canonical v1.Descriptor, err error) // Cancel ends the blob write without storing any data and frees any // associated resources. Any data written thus far will be lost. Cancel diff --git a/vendor/github.com/distribution/distribution/v3/doc.go b/vendor/github.com/distribution/distribution/v3/doc.go index bdd8cb708e..d28c892df7 100644 --- a/vendor/github.com/distribution/distribution/v3/doc.go +++ b/vendor/github.com/distribution/distribution/v3/doc.go @@ -1,6 +1,6 @@ // Package distribution will define the interfaces for the components of // docker distribution. The goal is to allow users to reliably package, ship -// and store content related to docker images. +// and store content related to container images. // // This is currently a work in progress. More details are available in the // README.md. diff --git a/vendor/github.com/distribution/distribution/v3/docker-bake.hcl b/vendor/github.com/distribution/distribution/v3/docker-bake.hcl index db9fa1f516..2d5aba76ad 100644 --- a/vendor/github.com/distribution/distribution/v3/docker-bake.hcl +++ b/vendor/github.com/distribution/distribution/v3/docker-bake.hcl @@ -39,11 +39,7 @@ target "update-vendor" { target "mod-outdated" { dockerfile = "./dockerfiles/vendor.Dockerfile" target = "outdated" - args = { - // used to invalidate cache for outdated run stage - // can be dropped when https://github.com/moby/buildkit/issues/1213 fixed - _RANDOM = uuidv4() - } + no-cache-filter = ["outdated"] output = ["type=cacheonly"] } @@ -65,7 +61,8 @@ target "artifact-all" { "linux/arm/v7", "linux/arm64", "linux/ppc64le", - "linux/s390x" + "linux/s390x", + "linux/riscv64" ] } @@ -91,6 +88,42 @@ target "image-all" { "linux/arm/v7", "linux/arm64", "linux/ppc64le", - "linux/s390x" + "linux/s390x", + "linux/riscv64" ] } + +target "_common_docs" { + dockerfile = "./dockerfiles/docs.Dockerfile" +} + +target "docs-export" { + inherits = ["_common_docs"] + target = "out" + output = ["type=local,dest=build/docs"] +} + +target "docs-image" { + inherits = ["_common_docs"] + target = "server" + output = ["type=docker"] + tags = ["registry-docs:local"] +} + +target "docs-test" { + inherits = ["_common_docs"] + target = "test" + output = ["type=cacheonly"] +} + +target "authors" { + dockerfile = "./dockerfiles/authors.Dockerfile" + target = "update" + output = ["."] +} + +target "validate-authors" { + dockerfile = "./dockerfiles/authors.Dockerfile" + target = "validate" + output = ["type=cacheonly"] +} diff --git a/vendor/github.com/distribution/distribution/v3/errors.go b/vendor/github.com/distribution/distribution/v3/errors.go index 8e0b788d6c..4fbfc85285 100644 --- a/vendor/github.com/distribution/distribution/v3/errors.go +++ b/vendor/github.com/distribution/distribution/v3/errors.go @@ -90,7 +90,7 @@ func (ErrManifestUnverified) Error() string { type ErrManifestVerification []error func (errs ErrManifestVerification) Error() string { - var parts []string + parts := make([]string, 0, len(errs)) for _, err := range errs { parts = append(parts, err.Error()) } diff --git a/vendor/github.com/distribution/distribution/v3/context/context.go b/vendor/github.com/distribution/distribution/v3/internal/dcontext/context.go similarity index 94% rename from vendor/github.com/distribution/distribution/v3/context/context.go rename to vendor/github.com/distribution/distribution/v3/internal/dcontext/context.go index 0a199615ff..fe8379800b 100644 --- a/vendor/github.com/distribution/distribution/v3/context/context.go +++ b/vendor/github.com/distribution/distribution/v3/internal/dcontext/context.go @@ -1,10 +1,10 @@ -package context +package dcontext import ( "context" "sync" - "github.com/distribution/distribution/v3/uuid" + "github.com/google/uuid" ) // instanceContext is a context that provides only an instance id. It is @@ -22,7 +22,7 @@ func (ic *instanceContext) Value(key interface{}) interface{} { // call a random generator from the package initialization // code. For various reasons random could not be available // https://github.com/distribution/distribution/issues/782 - ic.id = uuid.Generate().String() + ic.id = uuid.NewString() }) return ic.id } diff --git a/vendor/github.com/distribution/distribution/v3/context/doc.go b/vendor/github.com/distribution/distribution/v3/internal/dcontext/doc.go similarity index 93% rename from vendor/github.com/distribution/distribution/v3/context/doc.go rename to vendor/github.com/distribution/distribution/v3/internal/dcontext/doc.go index 51376dd69f..a8d9a74021 100644 --- a/vendor/github.com/distribution/distribution/v3/context/doc.go +++ b/vendor/github.com/distribution/distribution/v3/internal/dcontext/doc.go @@ -1,16 +1,16 @@ -// Package context provides several utilities for working with +// Package dcontext provides several utilities for working with // Go's context in http requests. Primarily, the focus is on logging relevant // request information but this package is not limited to that purpose. // // The easiest way to get started is to get the background context: // -// ctx := context.Background() +// ctx := dcontext.Background() // // The returned context should be passed around your application and be the // root of all other context instances. If the application has a version, this // line should be called before anything else: // -// ctx := context.WithVersion(context.Background(), version) +// ctx := dcontext.WithVersion(dcontext.Background(), version) // // The above will store the version in the context and will be available to // the logger. @@ -27,7 +27,7 @@ // the context and reported with the logger. The following example would // return a logger that prints the version with each log message: // -// ctx := context.Context(context.Background(), "version", version) +// ctx := context.WithValue(dcontext.Background(), "version", version) // GetLogger(ctx, "version").Infof("this log message has a version field") // // The above would print out a log message like this: @@ -85,4 +85,4 @@ // can be traced in log messages. Using the fields like "http.request.id", one // can analyze call flow for a particular request with a simple grep of the // logs. -package context +package dcontext diff --git a/vendor/github.com/distribution/distribution/v3/context/http.go b/vendor/github.com/distribution/distribution/v3/internal/dcontext/http.go similarity index 82% rename from vendor/github.com/distribution/distribution/v3/context/http.go rename to vendor/github.com/distribution/distribution/v3/internal/dcontext/http.go index 68f977ce0d..84d5b4744b 100644 --- a/vendor/github.com/distribution/distribution/v3/context/http.go +++ b/vendor/github.com/distribution/distribution/v3/internal/dcontext/http.go @@ -1,17 +1,16 @@ -package context +package dcontext import ( "context" "errors" - "net" "net/http" "strings" "sync" "time" - "github.com/distribution/distribution/v3/uuid" + "github.com/distribution/distribution/v3/internal/requestutil" + "github.com/google/uuid" "github.com/gorilla/mux" - log "github.com/sirupsen/logrus" ) // Common errors used with this package. @@ -20,48 +19,6 @@ var ( ErrNoResponseWriterContext = errors.New("no http response in context") ) -func parseIP(ipStr string) net.IP { - ip := net.ParseIP(ipStr) - if ip == nil { - log.Warnf("invalid remote IP address: %q", ipStr) - } - return ip -} - -// RemoteAddr extracts the remote address of the request, taking into -// account proxy headers. -func RemoteAddr(r *http.Request) string { - if prior := r.Header.Get("X-Forwarded-For"); prior != "" { - remoteAddr, _, _ := strings.Cut(prior, ",") - remoteAddr = strings.Trim(remoteAddr, " ") - if parseIP(remoteAddr) != nil { - return remoteAddr - } - } - // X-Real-Ip is less supported, but worth checking in the - // absence of X-Forwarded-For - if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { - if parseIP(realIP) != nil { - return realIP - } - } - - return r.RemoteAddr -} - -// RemoteIP extracts the remote IP of the request, taking into -// account proxy headers. -func RemoteIP(r *http.Request) string { - addr := RemoteAddr(r) - - // Try parsing it as "IP:port" - if ip, _, err := net.SplitHostPort(addr); err == nil { - return ip - } - - return addr -} - // WithRequest places the request on the context. The context of the request // is assigned a unique id, available at "http.request.id". The request itself // is available at "http.request". Other common attributes are available under @@ -78,21 +35,11 @@ func WithRequest(ctx context.Context, r *http.Request) context.Context { return &httpRequestContext{ Context: ctx, startedAt: time.Now(), - id: uuid.Generate().String(), + id: uuid.NewString(), r: r, } } -// GetRequest returns the http request in the given context. Returns -// ErrNoRequestContext if the context does not have an http request associated -// with it. -func GetRequest(ctx context.Context) (*http.Request, error) { - if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { - return r, nil - } - return nil, ErrNoRequestContext -} - // GetRequestID attempts to resolve the current request id, if possible. An // error is return if it is not available on the context. func GetRequestID(ctx context.Context) string { @@ -193,7 +140,7 @@ func (ctx *httpRequestContext) Value(key interface{}) interface{} { case "http.request.uri": return ctx.r.RequestURI case "http.request.remoteaddr": - return RemoteAddr(ctx.r) + return requestutil.RemoteAddr(ctx.r) case "http.request.method": return ctx.r.Method case "http.request.host": diff --git a/vendor/github.com/distribution/distribution/v3/context/logger.go b/vendor/github.com/distribution/distribution/v3/internal/dcontext/logger.go similarity index 99% rename from vendor/github.com/distribution/distribution/v3/context/logger.go rename to vendor/github.com/distribution/distribution/v3/internal/dcontext/logger.go index f956a22820..058fc83104 100644 --- a/vendor/github.com/distribution/distribution/v3/context/logger.go +++ b/vendor/github.com/distribution/distribution/v3/internal/dcontext/logger.go @@ -1,4 +1,4 @@ -package context +package dcontext import ( "context" diff --git a/vendor/github.com/distribution/distribution/v3/context/trace.go b/vendor/github.com/distribution/distribution/v3/internal/dcontext/trace.go similarity index 96% rename from vendor/github.com/distribution/distribution/v3/context/trace.go rename to vendor/github.com/distribution/distribution/v3/internal/dcontext/trace.go index 5bf16f8fcf..ba24810534 100644 --- a/vendor/github.com/distribution/distribution/v3/context/trace.go +++ b/vendor/github.com/distribution/distribution/v3/internal/dcontext/trace.go @@ -1,11 +1,11 @@ -package context +package dcontext import ( "context" "runtime" "time" - "github.com/distribution/distribution/v3/uuid" + "github.com/google/uuid" ) // WithTrace allocates a traced timing span in a new context. This allows a @@ -46,7 +46,7 @@ func WithTrace(ctx context.Context) (context.Context, func(format string, a ...i f := runtime.FuncForPC(pc) ctx = &traced{ Context: ctx, - id: uuid.Generate().String(), + id: uuid.NewString(), start: time.Now(), parent: GetStringValue(ctx, "trace.id"), fnname: f.Name(), diff --git a/vendor/github.com/distribution/distribution/v3/context/util.go b/vendor/github.com/distribution/distribution/v3/internal/dcontext/util.go similarity index 97% rename from vendor/github.com/distribution/distribution/v3/context/util.go rename to vendor/github.com/distribution/distribution/v3/internal/dcontext/util.go index c462e75632..5b32ba16ff 100644 --- a/vendor/github.com/distribution/distribution/v3/context/util.go +++ b/vendor/github.com/distribution/distribution/v3/internal/dcontext/util.go @@ -1,4 +1,4 @@ -package context +package dcontext import ( "context" diff --git a/vendor/github.com/distribution/distribution/v3/context/version.go b/vendor/github.com/distribution/distribution/v3/internal/dcontext/version.go similarity index 97% rename from vendor/github.com/distribution/distribution/v3/context/version.go rename to vendor/github.com/distribution/distribution/v3/internal/dcontext/version.go index 97cf9d665f..0a0e9cbb6b 100644 --- a/vendor/github.com/distribution/distribution/v3/context/version.go +++ b/vendor/github.com/distribution/distribution/v3/internal/dcontext/version.go @@ -1,4 +1,4 @@ -package context +package dcontext import "context" diff --git a/vendor/github.com/distribution/distribution/v3/internal/requestutil/util.go b/vendor/github.com/distribution/distribution/v3/internal/requestutil/util.go new file mode 100644 index 0000000000..099e3454cb --- /dev/null +++ b/vendor/github.com/distribution/distribution/v3/internal/requestutil/util.go @@ -0,0 +1,51 @@ +package requestutil + +import ( + "net" + "net/http" + "strings" + + log "github.com/sirupsen/logrus" +) + +func parseIP(ipStr string) net.IP { + ip := net.ParseIP(ipStr) + if ip == nil { + log.Warnf("invalid remote IP address: %q", ipStr) + } + return ip +} + +// RemoteAddr extracts the remote address of the request, taking into +// account proxy headers. +func RemoteAddr(r *http.Request) string { + if prior := r.Header.Get("X-Forwarded-For"); prior != "" { + remoteAddr, _, _ := strings.Cut(prior, ",") + remoteAddr = strings.Trim(remoteAddr, " ") + if parseIP(remoteAddr) != nil { + return remoteAddr + } + } + // X-Real-Ip is less supported, but worth checking in the + // absence of X-Forwarded-For + if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { + if parseIP(realIP) != nil { + return realIP + } + } + + return r.RemoteAddr +} + +// RemoteIP extracts the remote IP of the request, taking into +// account proxy headers. +func RemoteIP(r *http.Request) string { + addr := RemoteAddr(r) + + // Try parsing it as "IP:port" + if ip, _, err := net.SplitHostPort(addr); err == nil { + return ip + } + + return addr +} diff --git a/vendor/github.com/distribution/distribution/v3/manifest/manifestlist/manifestlist.go b/vendor/github.com/distribution/distribution/v3/manifest/manifestlist/manifestlist.go index 52e67c7318..4752331ab1 100644 --- a/vendor/github.com/distribution/distribution/v3/manifest/manifestlist/manifestlist.go +++ b/vendor/github.com/distribution/distribution/v3/manifest/manifestlist/manifestlist.go @@ -8,6 +8,7 @@ import ( "github.com/distribution/distribution/v3" "github.com/distribution/distribution/v3/manifest" "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -18,65 +19,37 @@ const ( // SchemaVersion provides a pre-initialized version structure for this // packages version of the manifest. +// +// Deprecated: use [specs.Versioned] and set MediaType on the manifest +// to [MediaTypeManifestList]. +// +//nolint:staticcheck // ignore SA1019: manifest.Versioned is deprecated: var SchemaVersion = manifest.Versioned{ SchemaVersion: 2, MediaType: MediaTypeManifestList, } -// OCISchemaVersion provides a pre-initialized version structure for this -// packages OCIschema version of the manifest. -var OCISchemaVersion = manifest.Versioned{ - SchemaVersion: 2, - MediaType: v1.MediaTypeImageIndex, -} - func init() { - manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { - m := new(DeserializedManifestList) - err := m.UnmarshalJSON(b) - if err != nil { - return nil, distribution.Descriptor{}, err - } - - if m.MediaType != MediaTypeManifestList { - err = fmt.Errorf("mediaType in manifest list should be '%s' not '%s'", - MediaTypeManifestList, m.MediaType) - - return nil, distribution.Descriptor{}, err - } - - dgst := digest.FromBytes(b) - return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err - } - err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) - if err != nil { + if err := distribution.RegisterManifestSchema(MediaTypeManifestList, unmarshalManifestList); err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } +} - imageIndexFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { - if err := validateIndex(b); err != nil { - return nil, distribution.Descriptor{}, err - } - m := new(DeserializedManifestList) - err := m.UnmarshalJSON(b) - if err != nil { - return nil, distribution.Descriptor{}, err - } - - if m.MediaType != "" && m.MediaType != v1.MediaTypeImageIndex { - err = fmt.Errorf("if present, mediaType in image index should be '%s' not '%s'", - v1.MediaTypeImageIndex, m.MediaType) - - return nil, distribution.Descriptor{}, err - } - - dgst := digest.FromBytes(b) - return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: v1.MediaTypeImageIndex}, err +func unmarshalManifestList(b []byte) (distribution.Manifest, v1.Descriptor, error) { + m := &DeserializedManifestList{} + if err := m.UnmarshalJSON(b); err != nil { + return nil, v1.Descriptor{}, err } - err = distribution.RegisterManifestSchema(v1.MediaTypeImageIndex, imageIndexFunc) - if err != nil { - panic(fmt.Sprintf("Unable to register OCI Image Index: %s", err)) + + if m.MediaType != MediaTypeManifestList { + return nil, v1.Descriptor{}, fmt.Errorf("mediaType in manifest list should be '%s' not '%s'", MediaTypeManifestList, m.MediaType) } + + return m, v1.Descriptor{ + Digest: digest.FromBytes(b), + Size: int64(len(b)), + MediaType: MediaTypeManifestList, + }, nil } // PlatformSpec specifies a platform where a particular image manifest is @@ -108,7 +81,7 @@ type PlatformSpec struct { // A ManifestDescriptor references a platform-specific manifest. type ManifestDescriptor struct { - distribution.Descriptor + v1.Descriptor // Platform specifies which platform the manifest pointed to by the // descriptor runs on. @@ -117,7 +90,10 @@ type ManifestDescriptor struct { // ManifestList references manifests for various platforms. type ManifestList struct { - manifest.Versioned + specs.Versioned + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` // Manifests references a list of manifests Manifests []ManifestDescriptor `json:"manifests"` @@ -125,8 +101,8 @@ type ManifestList struct { // References returns the distribution descriptors for the referenced image // manifests. -func (m ManifestList) References() []distribution.Descriptor { - dependencies := make([]distribution.Descriptor, len(m.Manifests)) +func (m ManifestList) References() []v1.Descriptor { + dependencies := make([]v1.Descriptor, len(m.Manifests)) for i := range m.Manifests { dependencies[i] = m.Manifests[i].Descriptor dependencies[i].Platform = &v1.Platform{ @@ -154,23 +130,14 @@ type DeserializedManifestList struct { // DeserializedManifestList which contains the resulting manifest list // and its JSON representation. func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { - var mediaType string - if len(descriptors) > 0 && descriptors[0].Descriptor.MediaType == v1.MediaTypeImageManifest { - mediaType = v1.MediaTypeImageIndex - } else { - mediaType = MediaTypeManifestList - } - - return FromDescriptorsWithMediaType(descriptors, mediaType) + return fromDescriptorsWithMediaType(descriptors, MediaTypeManifestList) } -// FromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly -func FromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType string) (*DeserializedManifestList, error) { +// fromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly +func fromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType string) (*DeserializedManifestList, error) { m := ManifestList{ - Versioned: manifest.Versioned{ - SchemaVersion: 2, - MediaType: mediaType, - }, + Versioned: specs.Versioned{SchemaVersion: 2}, + MediaType: mediaType, } m.Manifests = make([]ManifestDescriptor, len(descriptors)) @@ -225,22 +192,18 @@ func (m DeserializedManifestList) Payload() (string, []byte, error) { return mediaType, m.canonical, nil } -// unknownDocument represents a manifest, manifest list, or index that has not -// yet been validated -type unknownDocument struct { - Config interface{} `json:"config,omitempty"` - Layers interface{} `json:"layers,omitempty"` -} - -// validateIndex returns an error if the byte slice is invalid JSON or if it +// validateManifestList returns an error if the byte slice is invalid JSON or if it // contains fields that belong to a manifest -func validateIndex(b []byte) error { - var doc unknownDocument +func validateManifestList(b []byte) error { + var doc struct { + Config interface{} `json:"config,omitempty"` + Layers interface{} `json:"layers,omitempty"` + } if err := json.Unmarshal(b, &doc); err != nil { return err } if doc.Config != nil || doc.Layers != nil { - return errors.New("index: expected index but found manifest") + return errors.New("manifestlist: expected list but found manifest") } return nil } diff --git a/vendor/github.com/distribution/distribution/v3/manifest/ocischema/builder.go b/vendor/github.com/distribution/distribution/v3/manifest/ocischema/builder.go index 7d99c184f3..584b620f8c 100644 --- a/vendor/github.com/distribution/distribution/v3/manifest/ocischema/builder.go +++ b/vendor/github.com/distribution/distribution/v3/manifest/ocischema/builder.go @@ -5,8 +5,8 @@ import ( "errors" "github.com/distribution/distribution/v3" - "github.com/distribution/distribution/v3/manifest" "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -20,7 +20,7 @@ type Builder struct { // layers is a list of layer descriptors that gets built by successive // calls to AppendReference. - layers []distribution.Descriptor + layers []v1.Descriptor // Annotations contains arbitrary metadata relating to the targeted content. annotations map[string]string @@ -32,7 +32,7 @@ type Builder struct { // NewManifestBuilder is used to build new manifests for the current schema // version. It takes a BlobService so it can publish the configuration blob // as part of the Build process, and annotations. -func NewManifestBuilder(bs distribution.BlobService, configJSON []byte, annotations map[string]string) distribution.ManifestBuilder { +func NewManifestBuilder(bs distribution.BlobService, configJSON []byte, annotations map[string]string) *Builder { mb := &Builder{ bs: bs, configJSON: make([]byte, len(configJSON)), @@ -58,11 +58,9 @@ func (mb *Builder) SetMediaType(mediaType string) error { // Build produces a final manifest from the given references. func (mb *Builder) Build(ctx context.Context) (distribution.Manifest, error) { m := Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 2, - MediaType: mb.mediaType, - }, - Layers: make([]distribution.Descriptor, len(mb.layers)), + Versioned: specs.Versioned{SchemaVersion: 2}, + MediaType: mb.mediaType, + Layers: make([]v1.Descriptor, len(mb.layers)), Annotations: mb.annotations, } copy(m.Layers, mb.layers) @@ -96,12 +94,12 @@ func (mb *Builder) Build(ctx context.Context) (distribution.Manifest, error) { } // AppendReference adds a reference to the current ManifestBuilder. -func (mb *Builder) AppendReference(d distribution.Describable) error { - mb.layers = append(mb.layers, d.Descriptor()) +func (mb *Builder) AppendReference(ref v1.Descriptor) error { + mb.layers = append(mb.layers, ref) return nil } // References returns the current references added to this builder. -func (mb *Builder) References() []distribution.Descriptor { +func (mb *Builder) References() []v1.Descriptor { return mb.layers } diff --git a/vendor/github.com/distribution/distribution/v3/manifest/ocischema/index.go b/vendor/github.com/distribution/distribution/v3/manifest/ocischema/index.go new file mode 100644 index 0000000000..add766f3e1 --- /dev/null +++ b/vendor/github.com/distribution/distribution/v3/manifest/ocischema/index.go @@ -0,0 +1,164 @@ +package ocischema + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/distribution/distribution/v3" + "github.com/distribution/distribution/v3/manifest" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// IndexSchemaVersion provides a pre-initialized version structure for OCI Image +// Indices. +// +// Deprecated: use [specs.Versioned] and set MediaType on the manifest +// to [v1.MediaTypeImageIndex]. +// +//nolint:staticcheck // ignore SA1019: manifest.Versioned is deprecated: +var IndexSchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: v1.MediaTypeImageIndex, +} + +func init() { + if err := distribution.RegisterManifestSchema(v1.MediaTypeImageIndex, unmarshalImageIndex); err != nil { + panic(fmt.Sprintf("Unable to register OCI Image Index: %s", err)) + } +} + +func unmarshalImageIndex(b []byte) (distribution.Manifest, v1.Descriptor, error) { + if err := validateIndex(b); err != nil { + return nil, v1.Descriptor{}, err + } + + m := &DeserializedImageIndex{} + if err := m.UnmarshalJSON(b); err != nil { + return nil, v1.Descriptor{}, err + } + + if m.MediaType != "" && m.MediaType != v1.MediaTypeImageIndex { + return nil, v1.Descriptor{}, fmt.Errorf("if present, mediaType in image index should be '%s' not '%s'", v1.MediaTypeImageIndex, m.MediaType) + } + + return m, v1.Descriptor{ + MediaType: v1.MediaTypeImageIndex, + Digest: digest.FromBytes(b), + Size: int64(len(b)), + Annotations: m.Annotations, + }, nil +} + +// ImageIndex references manifests for various platforms. +type ImageIndex struct { + specs.Versioned + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` + + // Manifests references a list of manifests + Manifests []v1.Descriptor `json:"manifests"` + + // Annotations is an optional field that contains arbitrary metadata for the + // image index + Annotations map[string]string `json:"annotations,omitempty"` +} + +// References returns the distribution descriptors for the referenced image +// manifests. +func (ii ImageIndex) References() []v1.Descriptor { + return ii.Manifests +} + +// DeserializedImageIndex wraps ManifestList with a copy of the original +// JSON. +type DeserializedImageIndex struct { + ImageIndex + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromDescriptors takes a slice of descriptors and a map of annotations, and +// returns a DeserializedManifestList which contains the resulting manifest list +// and its JSON representation. If annotations is nil or empty then the +// annotations property will be omitted from the JSON representation. +func FromDescriptors(descriptors []v1.Descriptor, annotations map[string]string) (*DeserializedImageIndex, error) { + return fromDescriptorsWithMediaType(descriptors, annotations, v1.MediaTypeImageIndex) +} + +// fromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly +func fromDescriptorsWithMediaType(descriptors []v1.Descriptor, annotations map[string]string, mediaType string) (_ *DeserializedImageIndex, err error) { + m := ImageIndex{ + Versioned: specs.Versioned{SchemaVersion: 2}, + MediaType: mediaType, + Annotations: annotations, + } + + m.Manifests = make([]v1.Descriptor, len(descriptors)) + copy(m.Manifests, descriptors) + + deserialized := DeserializedImageIndex{ + ImageIndex: m, + } + + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new ManifestList struct from JSON data. +func (m *DeserializedImageIndex) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b)) + // store manifest list in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into ManifestList object + var manifestList ImageIndex + if err := json.Unmarshal(m.canonical, &manifestList); err != nil { + return err + } + + m.ImageIndex = manifestList + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedImageIndex) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedImageIndex") +} + +// Payload returns the raw content of the manifest list. The contents can be +// used to calculate the content identifier. +func (m DeserializedImageIndex) Payload() (string, []byte, error) { + mediaType := m.MediaType + if m.MediaType == "" { + mediaType = v1.MediaTypeImageIndex + } + + return mediaType, m.canonical, nil +} + +// validateIndex returns an error if the byte slice is invalid JSON or if it +// contains fields that belong to a manifest +func validateIndex(b []byte) error { + var doc struct { + Config interface{} `json:"config,omitempty"` + Layers interface{} `json:"layers,omitempty"` + } + if err := json.Unmarshal(b, &doc); err != nil { + return err + } + if doc.Config != nil || doc.Layers != nil { + return errors.New("index: expected index but found manifest") + } + return nil +} diff --git a/vendor/github.com/distribution/distribution/v3/manifest/ocischema/manifest.go b/vendor/github.com/distribution/distribution/v3/manifest/ocischema/manifest.go index 40b0bd8390..2009e2f5ac 100644 --- a/vendor/github.com/distribution/distribution/v3/manifest/ocischema/manifest.go +++ b/vendor/github.com/distribution/distribution/v3/manifest/ocischema/manifest.go @@ -8,61 +8,74 @@ import ( "github.com/distribution/distribution/v3" "github.com/distribution/distribution/v3/manifest" "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) -// SchemaVersion provides a pre-initialized version structure for this -// packages version of the manifest. +// SchemaVersion provides a pre-initialized version structure for OCI Image +// Manifests. +// +// Deprecated: use [specs.Versioned] and set MediaType on the manifest +// to [v1.MediaTypeImageManifest]. +// +//nolint:staticcheck // ignore SA1019: manifest.Versioned is deprecated: var SchemaVersion = manifest.Versioned{ - SchemaVersion: 2, // historical value here.. does not pertain to OCI or docker version + SchemaVersion: 2, MediaType: v1.MediaTypeImageManifest, } func init() { - ocischemaFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { - if err := validateManifest(b); err != nil { - return nil, distribution.Descriptor{}, err - } - m := new(DeserializedManifest) - err := m.UnmarshalJSON(b) - if err != nil { - return nil, distribution.Descriptor{}, err - } - - dgst := digest.FromBytes(b) - return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: v1.MediaTypeImageManifest}, err - } - err := distribution.RegisterManifestSchema(v1.MediaTypeImageManifest, ocischemaFunc) - if err != nil { + if err := distribution.RegisterManifestSchema(v1.MediaTypeImageManifest, unmarshalOCISchema); err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } } +func unmarshalOCISchema(b []byte) (distribution.Manifest, v1.Descriptor, error) { + if err := validateManifest(b); err != nil { + return nil, v1.Descriptor{}, err + } + + m := &DeserializedManifest{} + if err := m.UnmarshalJSON(b); err != nil { + return nil, v1.Descriptor{}, err + } + + return m, v1.Descriptor{ + MediaType: v1.MediaTypeImageManifest, + Digest: digest.FromBytes(b), + Size: int64(len(b)), + Annotations: m.Annotations, + }, nil +} + // Manifest defines a ocischema manifest. type Manifest struct { - manifest.Versioned + specs.Versioned + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` // Config references the image configuration as a blob. - Config distribution.Descriptor `json:"config"` + Config v1.Descriptor `json:"config"` // Layers lists descriptors for the layers referenced by the // configuration. - Layers []distribution.Descriptor `json:"layers"` + Layers []v1.Descriptor `json:"layers"` // Annotations contains arbitrary metadata for the image manifest. Annotations map[string]string `json:"annotations,omitempty"` } // References returns the descriptors of this manifests references. -func (m Manifest) References() []distribution.Descriptor { - references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) +func (m Manifest) References() []v1.Descriptor { + references := make([]v1.Descriptor, 0, 1+len(m.Layers)) references = append(references, m.Config) references = append(references, m.Layers...) return references } // Target returns the target of this manifest. -func (m Manifest) Target() distribution.Descriptor { +func (m Manifest) Target() v1.Descriptor { return m.Config } @@ -120,20 +133,16 @@ func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { // Payload returns the raw content of the manifest. The contents can be used to // calculate the content identifier. -func (m DeserializedManifest) Payload() (string, []byte, error) { +func (m *DeserializedManifest) Payload() (string, []byte, error) { return v1.MediaTypeImageManifest, m.canonical, nil } -// unknownDocument represents a manifest, manifest list, or index that has not -// yet been validated -type unknownDocument struct { - Manifests interface{} `json:"manifests,omitempty"` -} - // validateManifest returns an error if the byte slice is invalid JSON or if it // contains fields that belong to a index func validateManifest(b []byte) error { - var doc unknownDocument + var doc struct { + Manifests interface{} `json:"manifests,omitempty"` + } if err := json.Unmarshal(b, &doc); err != nil { return err } diff --git a/vendor/github.com/distribution/distribution/v3/manifest/schema1/reference_builder.go b/vendor/github.com/distribution/distribution/v3/manifest/schema1/reference_builder.go deleted file mode 100644 index 13713b5270..0000000000 --- a/vendor/github.com/distribution/distribution/v3/manifest/schema1/reference_builder.go +++ /dev/null @@ -1,112 +0,0 @@ -package schema1 - -import ( - "context" - "errors" - "fmt" - - "github.com/distribution/distribution/v3" - "github.com/distribution/distribution/v3/manifest" - "github.com/distribution/distribution/v3/reference" - "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" -) - -// referenceManifestBuilder is a type for constructing manifests from schema1 -// dependencies. -type referenceManifestBuilder struct { - Manifest - pk libtrust.PrivateKey -} - -// NewReferenceManifestBuilder is used to build new manifests for the current -// schema version using schema1 dependencies. -// -// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015. -// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification. -func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder { - tag := "" - if tagged, isTagged := ref.(reference.Tagged); isTagged { - tag = tagged.Tag() - } - - return &referenceManifestBuilder{ - Manifest: Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: ref.Name(), - Tag: tag, - Architecture: architecture, - }, - pk: pk, - } -} - -func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) { - m := mb.Manifest - if len(m.FSLayers) == 0 { - return nil, errors.New("cannot build manifest with zero layers or history") - } - - m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers)) - m.History = make([]History, len(mb.Manifest.History)) - copy(m.FSLayers, mb.Manifest.FSLayers) - copy(m.History, mb.Manifest.History) - - return Sign(&m, mb.pk) -} - -// AppendReference adds a reference to the current ManifestBuilder. -// -// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015. -// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification. -func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { - r, ok := d.(Reference) - if !ok { - return fmt.Errorf("unable to add non-reference type to v1 builder") - } - - // Entries need to be prepended - mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) - mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) - return nil -} - -// References returns the current references added to this builder. -// -// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015. -// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification. -func (mb *referenceManifestBuilder) References() []distribution.Descriptor { - refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers)) - for i := range mb.Manifest.FSLayers { - layerDigest := mb.Manifest.FSLayers[i].BlobSum - history := mb.Manifest.History[i] - ref := Reference{layerDigest, 0, history} - refs[i] = ref.Descriptor() - } - return refs -} - -// Reference describes a Manifest v2, schema version 1 dependency. -// An FSLayer associated with a history entry. -// -// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015. -// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification. -type Reference struct { - Digest digest.Digest - Size int64 // if we know it, set it for the descriptor. - History History -} - -// Descriptor describes a reference. -// -// Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015. -// Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification. -func (r Reference) Descriptor() distribution.Descriptor { - return distribution.Descriptor{ - MediaType: MediaTypeManifestLayer, - Digest: r.Digest, - Size: r.Size, - } -} diff --git a/vendor/github.com/distribution/distribution/v3/manifest/schema2/builder.go b/vendor/github.com/distribution/distribution/v3/manifest/schema2/builder.go index 585741d03c..7db3143112 100644 --- a/vendor/github.com/distribution/distribution/v3/manifest/schema2/builder.go +++ b/vendor/github.com/distribution/distribution/v3/manifest/schema2/builder.go @@ -4,26 +4,28 @@ import ( "context" "github.com/distribution/distribution/v3" + "github.com/opencontainers/image-spec/specs-go" + v1 "github.com/opencontainers/image-spec/specs-go/v1" ) -// builder is a type for constructing manifests. -type builder struct { +// Builder is a type for constructing manifests. +type Builder struct { // configDescriptor is used to describe configuration - configDescriptor distribution.Descriptor + configDescriptor v1.Descriptor // configJSON references configJSON []byte // dependencies is a list of descriptors that gets built by successive // calls to AppendReference. In case of image configuration these are layers. - dependencies []distribution.Descriptor + dependencies []v1.Descriptor } // NewManifestBuilder is used to build new manifests for the current schema // version. It takes a BlobService so it can publish the configuration blob // as part of the Build process. -func NewManifestBuilder(configDescriptor distribution.Descriptor, configJSON []byte) distribution.ManifestBuilder { - mb := &builder{ +func NewManifestBuilder(configDescriptor v1.Descriptor, configJSON []byte) *Builder { + mb := &Builder{ configDescriptor: configDescriptor, configJSON: make([]byte, len(configJSON)), } @@ -33,10 +35,11 @@ func NewManifestBuilder(configDescriptor distribution.Descriptor, configJSON []b } // Build produces a final manifest from the given references. -func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { +func (mb *Builder) Build(ctx context.Context) (distribution.Manifest, error) { m := Manifest{ - Versioned: SchemaVersion, - Layers: make([]distribution.Descriptor, len(mb.dependencies)), + Versioned: specs.Versioned{SchemaVersion: defaultSchemaVersion}, + MediaType: defaultMediaType, + Layers: make([]v1.Descriptor, len(mb.dependencies)), } copy(m.Layers, mb.dependencies) @@ -46,12 +49,12 @@ func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { } // AppendReference adds a reference to the current ManifestBuilder. -func (mb *builder) AppendReference(d distribution.Describable) error { - mb.dependencies = append(mb.dependencies, d.Descriptor()) +func (mb *Builder) AppendReference(ref v1.Descriptor) error { + mb.dependencies = append(mb.dependencies, ref) return nil } // References returns the current references added to this builder. -func (mb *builder) References() []distribution.Descriptor { +func (mb *Builder) References() []v1.Descriptor { return mb.dependencies } diff --git a/vendor/github.com/distribution/distribution/v3/manifest/schema2/manifest.go b/vendor/github.com/distribution/distribution/v3/manifest/schema2/manifest.go index 89bf63c966..6df2fa552b 100644 --- a/vendor/github.com/distribution/distribution/v3/manifest/schema2/manifest.go +++ b/vendor/github.com/distribution/distribution/v3/manifest/schema2/manifest.go @@ -8,6 +8,8 @@ import ( "github.com/distribution/distribution/v3" "github.com/distribution/distribution/v3/manifest" "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" + v1 "github.com/opencontainers/image-spec/specs-go/v1" ) const ( @@ -33,52 +35,67 @@ const ( MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar" ) +const ( + defaultSchemaVersion = 2 + defaultMediaType = MediaTypeManifest +) + // SchemaVersion provides a pre-initialized version structure for this // packages version of the manifest. +// +// Deprecated: use [specs.Versioned] and set MediaType on the manifest +// to [MediaTypeManifest]. +// +//nolint:staticcheck // ignore SA1019: manifest.Versioned is deprecated: var SchemaVersion = manifest.Versioned{ - SchemaVersion: 2, - MediaType: MediaTypeManifest, + SchemaVersion: defaultSchemaVersion, + MediaType: defaultMediaType, } func init() { - schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { - m := new(DeserializedManifest) - err := m.UnmarshalJSON(b) - if err != nil { - return nil, distribution.Descriptor{}, err - } - - dgst := digest.FromBytes(b) - return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err - } - err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func) - if err != nil { + if err := distribution.RegisterManifestSchema(defaultMediaType, unmarshalSchema2); err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } } +func unmarshalSchema2(b []byte) (distribution.Manifest, v1.Descriptor, error) { + m := &DeserializedManifest{} + if err := m.UnmarshalJSON(b); err != nil { + return nil, v1.Descriptor{}, err + } + + return m, v1.Descriptor{ + Digest: digest.FromBytes(b), + Size: int64(len(b)), + MediaType: defaultMediaType, + }, nil +} + // Manifest defines a schema2 manifest. type Manifest struct { - manifest.Versioned + specs.Versioned + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` // Config references the image configuration as a blob. - Config distribution.Descriptor `json:"config"` + Config v1.Descriptor `json:"config"` // Layers lists descriptors for the layers referenced by the // configuration. - Layers []distribution.Descriptor `json:"layers"` + Layers []v1.Descriptor `json:"layers"` } // References returns the descriptors of this manifests references. -func (m Manifest) References() []distribution.Descriptor { - references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) +func (m Manifest) References() []v1.Descriptor { + references := make([]v1.Descriptor, 0, 1+len(m.Layers)) references = append(references, m.Config) references = append(references, m.Layers...) return references } // Target returns the target of this manifest. -func (m Manifest) Target() distribution.Descriptor { +func (m Manifest) Target() v1.Descriptor { return m.Config } @@ -114,9 +131,8 @@ func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { return err } - if mfst.MediaType != MediaTypeManifest { - return fmt.Errorf("mediaType in manifest should be '%s' not '%s'", - MediaTypeManifest, mfst.MediaType) + if mfst.MediaType != defaultMediaType { + return fmt.Errorf("mediaType in manifest should be '%s' not '%s'", defaultMediaType, mfst.MediaType) } m.Manifest = mfst diff --git a/vendor/github.com/distribution/distribution/v3/manifest/versioned.go b/vendor/github.com/distribution/distribution/v3/manifest/versioned.go index caa6b14e88..ec8ebc557c 100644 --- a/vendor/github.com/distribution/distribution/v3/manifest/versioned.go +++ b/vendor/github.com/distribution/distribution/v3/manifest/versioned.go @@ -3,6 +3,8 @@ package manifest // Versioned provides a struct with the manifest schemaVersion and mediaType. // Incoming content with unknown schema version can be decoded against this // struct to check the version. +// +// Deprecated: use [specs.Versioned] and set MediaType on the Manifest itself. type Versioned struct { // SchemaVersion is the image manifest schema that this image follows SchemaVersion int `json:"schemaVersion"` diff --git a/vendor/github.com/distribution/distribution/v3/manifests.go b/vendor/github.com/distribution/distribution/v3/manifests.go index 8f84a220a9..290558dabe 100644 --- a/vendor/github.com/distribution/distribution/v3/manifests.go +++ b/vendor/github.com/distribution/distribution/v3/manifests.go @@ -6,6 +6,7 @@ import ( "mime" "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" ) // Manifest represents a registry object specifying a set of @@ -13,41 +14,20 @@ import ( type Manifest interface { // References returns a list of objects which make up this manifest. // A reference is anything which can be represented by a - // distribution.Descriptor. These can consist of layers, resources or other + // Descriptor. These can consist of layers, resources or other // manifests. // // While no particular order is required, implementations should return // them from highest to lowest priority. For example, one might want to // return the base layer before the top layer. - References() []Descriptor + References() []v1.Descriptor // Payload provides the serialized format of the manifest, in addition to // the media type. Payload() (mediaType string, payload []byte, err error) } -// ManifestBuilder creates a manifest allowing one to include dependencies. -// Instances can be obtained from a version-specific manifest package. Manifest -// specific data is passed into the function which creates the builder. -type ManifestBuilder interface { - // Build creates the manifest from his builder. - Build(ctx context.Context) (Manifest, error) - - // References returns a list of objects which have been added to this - // builder. The dependencies are returned in the order they were added, - // which should be from base to head. - References() []Descriptor - - // AppendReference includes the given object in the manifest after any - // existing dependencies. If the add fails, such as when adding an - // unsupported dependency, an error may be returned. - // - // The destination of the reference is dependent on the manifest type and - // the dependency type. - AppendReference(dependency Describable) error -} - -// ManifestService describes operations on image manifests. +// ManifestService describes operations on manifests. type ManifestService interface { // Exists returns true if the manifest exists. Exists(ctx context.Context, dgst digest.Digest) (bool, error) @@ -69,9 +49,13 @@ type ManifestEnumerator interface { Enumerate(ctx context.Context, ingester func(digest.Digest) error) error } -// Describable is an interface for descriptors +// Describable is an interface for descriptors. +// +// Implementations of Describable are generally objects which can be +// described, not simply descriptors. type Describable interface { - Descriptor() Descriptor + // Descriptor returns the descriptor. + Descriptor() v1.Descriptor } // ManifestMediaTypes returns the supported media types for manifests. @@ -85,13 +69,13 @@ func ManifestMediaTypes() (mediaTypes []string) { } // UnmarshalFunc implements manifest unmarshalling a given MediaType -type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) +type UnmarshalFunc func([]byte) (Manifest, v1.Descriptor, error) var mappings = make(map[string]UnmarshalFunc) // UnmarshalManifest looks up manifest unmarshal functions based on // MediaType -func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { +func UnmarshalManifest(ctHeader string, p []byte) (Manifest, v1.Descriptor, error) { // Need to look up by the actual media type, not the raw contents of // the header. Strip semicolons and anything following them. var mediaType string @@ -99,7 +83,7 @@ func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) var err error mediaType, _, err = mime.ParseMediaType(ctHeader) if err != nil { - return nil, Descriptor{}, err + return nil, v1.Descriptor{}, err } } @@ -107,7 +91,7 @@ func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) if !ok { unmarshalFunc, ok = mappings[""] if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) + return nil, v1.Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) } } diff --git a/vendor/github.com/distribution/distribution/v3/metrics/prometheus.go b/vendor/github.com/distribution/distribution/v3/metrics/prometheus.go index 91b32b23d9..d84781190d 100644 --- a/vendor/github.com/distribution/distribution/v3/metrics/prometheus.go +++ b/vendor/github.com/distribution/distribution/v3/metrics/prometheus.go @@ -13,4 +13,7 @@ var ( // NotificationsNamespace is the prometheus namespace of notification related metrics NotificationsNamespace = metrics.NewNamespace(NamespacePrefix, "notifications", nil) + + // ProxyNamespace is the prometheus namespace of proxy related metrics + ProxyNamespace = metrics.NewNamespace(NamespacePrefix, "proxy", nil) ) diff --git a/vendor/github.com/distribution/distribution/v3/reference/helpers.go b/vendor/github.com/distribution/distribution/v3/reference/helpers.go deleted file mode 100644 index d10c7ef838..0000000000 --- a/vendor/github.com/distribution/distribution/v3/reference/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package reference - -import "path" - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See [path.Match] for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/vendor/github.com/distribution/distribution/v3/reference/normalize.go b/vendor/github.com/distribution/distribution/v3/reference/normalize.go deleted file mode 100644 index f2ce13ada8..0000000000 --- a/vendor/github.com/distribution/distribution/v3/reference/normalize.go +++ /dev/null @@ -1,224 +0,0 @@ -package reference - -import ( - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // legacyDefaultDomain is the legacy domain for Docker Hub (which was - // originally named "the Docker Index"). This domain is still used for - // authentication and image search, which were part of the "v1" Docker - // registry specification. - // - // This domain will continue to be supported, but there are plans to consolidate - // legacy domains to new "canonical" domains. Once those domains are decided - // on, we must update the normalization functions, but preserve compatibility - // with existing installs, clients, and user configuration. - legacyDefaultDomain = "index.docker.io" - - // defaultDomain is the default domain used for images on Docker Hub. - // It is used to normalize "familiar" names to canonical names, for example, - // to convert "ubuntu" to "docker.io/library/ubuntu:latest". - // - // Note that actual domain of Docker Hub's registry is registry-1.docker.io. - // This domain will continue to be supported, but there are plans to consolidate - // legacy domains to new "canonical" domains. Once those domains are decided - // on, we must update the normalization functions, but preserve compatibility - // with existing installs, clients, and user configuration. - defaultDomain = "docker.io" - - // officialRepoPrefix is the namespace used for official images on Docker Hub. - // It is used to normalize "familiar" names to canonical names, for example, - // to convert "ubuntu" to "docker.io/library/ubuntu:latest". - officialRepoPrefix = "library/" - - // defaultTag is the default tag if no tag is provided. - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remote string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remote = remainder[:tagSep] - } else { - remote = remainder - } - if strings.ToLower(remote) != remote { - return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remote) - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// namedTaggedDigested is a reference that has both a tag and a digest. -type namedTaggedDigested interface { - NamedTagged - Digested -} - -// ParseDockerRef normalizes the image reference following the docker convention, -// which allows for references to contain both a tag and a digest. It returns a -// reference that is either tagged or digested. For references containing both -// a tag and a digest, it returns a digested reference. For example, the following -// reference: -// -// docker.io/library/busybox:latest@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa -// -// Is returned as a digested reference (with the ":latest" tag removed): -// -// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa -// -// References that are already "tagged" or "digested" are returned unmodified: -// -// // Already a digested reference -// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa -// -// // Already a named reference -// docker.io/library/busybox:latest -func ParseDockerRef(ref string) (Named, error) { - named, err := ParseNormalizedNamed(ref) - if err != nil { - return nil, err - } - if canonical, ok := named.(namedTaggedDigested); ok { - // The reference is both tagged and digested; only return digested. - newNamed, err := WithName(canonical.Name()) - if err != nil { - return nil, err - } - return WithDigest(newNamed, canonical.Digest()) - } - return TagNameOnly(named), nil -} - -// splitDockerDomain splits a repository name to domain and remote-name. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != localhost && strings.ToLower(name[:i]) == name[:i]) { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoPrefix + remainder - } - return -} - -// familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if strings.HasPrefix(repo.path, officialRepoPrefix) { - // TODO(thaJeztah): this check may be too strict, as it assumes the - // "library/" namespace does not have nested namespaces. While this - // is true (currently), technically it would be possible for Docker - // Hub to use those (e.g. "library/distros/ubuntu:latest"). - // See https://github.com/distribution/distribution/pull/3769#issuecomment-1302031785. - if remainder := strings.TrimPrefix(repo.path, officialRepoPrefix); !strings.ContainsRune(remainder, '/') { - repo.path = remainder - } - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/distribution/distribution/v3/reference/reference.go b/vendor/github.com/distribution/distribution/v3/reference/reference.go deleted file mode 100644 index e98c44daa2..0000000000 --- a/vendor/github.com/distribution/distribution/v3/reference/reference.go +++ /dev/null @@ -1,436 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] remote-name -// domain := host [':' port-number] -// host := domain-name | IPv4address | \[ IPv6address \] ; rfc3986 appendix-A -// domain-name := domain-component ['.' domain-component]* -// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// path-component := alpha-numeric [separator alpha-numeric]* -// path (or "remote-name") := path-component ['/' path-component]* -// alpha-numeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -// -// identifier := /[a-f0-9]{64}/ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - ErrNameContainsUppercase = errors.New("repository name must be lowercase") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) - - // ErrNameNotCanonical is returned when a name is not canonical. - ErrNameNotCanonical = errors.New("repository name must be canonical") -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// namedRepository is a reference to a repository with a name. -// A namedRepository has both domain and path components. -type namedRepository interface { - Named - Domain() string - Path() string -} - -// Domain returns the domain part of the [Named] reference. -func Domain(named Named) string { - if r, ok := named.(namedRepository); ok { - return r.Domain() - } - domain, _ := splitDomain(named.Name()) - return domain -} - -// Path returns the name without the domain part of the [Named] reference. -func Path(named Named) (name string) { - if r, ok := named.(namedRepository); ok { - return r.Path() - } - _, path := splitDomain(named.Name()) - return path -} - -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// -// Deprecated: Use [Domain] or [Path]. -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { - return nil, ErrNameContainsUppercase - } - return nil, ErrReferenceInvalidFormat - } - - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - var repo repository - - nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if len(nameMatch) == 3 { - repo.domain = nameMatch[1] - repo.path = nameMatch[2] - } else { - repo.domain = "" - repo.path = matches[1] - } - - ref := reference{ - namedRepository: repo, - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.Parse(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -func ParseNamed(s string) (Named, error) { - named, err := ParseNormalizedNamed(s) - if err != nil { - return nil, err - } - if named.String() != s { - return nil, ErrNameNotCanonical - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return nil, ErrReferenceInvalidFormat - } - return repository{ - domain: match[1], - path: match[2], - }, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if canonical, ok := name.(Canonical); ok { - return reference{ - namedRepository: repo, - tag: tag, - digest: canonical.Digest(), - }, nil - } - return taggedReference{ - namedRepository: repo, - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if tagged, ok := name.(Tagged); ok { - return reference{ - namedRepository: repo, - tag: tagged.Tag(), - digest: digest, - }, nil - } - return canonicalReference{ - namedRepository: repo, - digest: digest, - }, nil -} - -// TrimNamed removes any tag or digest from the named reference. -func TrimNamed(ref Named) Named { - repo := repository{} - if r, ok := ref.(namedRepository); ok { - repo.domain, repo.path = r.Domain(), r.Path() - } else { - repo.domain, repo.path = splitDomain(ref.Name()) - } - return repo -} - -func getBestReferenceType(ref reference) Reference { - if ref.Name() == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - namedRepository: ref.namedRepository, - digest: ref.digest, - } - } - return ref.namedRepository - } - if ref.digest == "" { - return taggedReference{ - namedRepository: ref.namedRepository, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - namedRepository - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.Name() + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository struct { - domain string - path string -} - -func (r repository) String() string { - return r.Name() -} - -func (r repository) Name() string { - if r.domain == "" { - return r.path - } - return r.domain + "/" + r.path -} - -func (r repository) Domain() string { - return r.domain -} - -func (r repository) Path() string { - return r.path -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - namedRepository - tag string -} - -func (t taggedReference) String() string { - return t.Name() + ":" + t.tag -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - namedRepository - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.Name() + "@" + c.digest.String() -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/vendor/github.com/distribution/distribution/v3/reference/regexp.go b/vendor/github.com/distribution/distribution/v3/reference/regexp.go deleted file mode 100644 index 65bc49d79b..0000000000 --- a/vendor/github.com/distribution/distribution/v3/reference/regexp.go +++ /dev/null @@ -1,163 +0,0 @@ -package reference - -import ( - "regexp" - "strings" -) - -// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:"). -var DigestRegexp = regexp.MustCompile(digestPat) - -// DomainRegexp matches hostname or IP-addresses, optionally including a port -// number. It defines the structure of potential domain components that may be -// part of image names. This is purposely a subset of what is allowed by DNS to -// ensure backwards compatibility with Docker image names. It may be a subset of -// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between -// square brackets (excluding zone identifiers as defined by [RFC 6874] or special -// addresses such as IPv4-Mapped). -// -// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874. -var DomainRegexp = regexp.MustCompile(domainAndPort) - -// IdentifierRegexp is the format for string identifier used as a -// content addressable identifier using sha256. These identifiers -// are like digests without the algorithm, since sha256 is used. -var IdentifierRegexp = regexp.MustCompile(identifier) - -// NameRegexp is the format for the name component of references, including -// an optional domain and port, but without tag or digest suffix. -var NameRegexp = regexp.MustCompile(namePat) - -// ReferenceRegexp is the full supported format of a reference. The regexp -// is anchored and has capturing groups for name, tag, and digest -// components. -var ReferenceRegexp = regexp.MustCompile(referencePat) - -// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go]. -// -// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28 -var TagRegexp = regexp.MustCompile(tag) - -const ( - // alphanumeric defines the alphanumeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphanumeric = `[a-z0-9]+` - - // separator defines the separators allowed to be embedded in name - // components. This allows one period, one or two underscore and multiple - // dashes. Repeated dashes and underscores are intentionally treated - // differently. In order to support valid hostnames as name components, - // supporting repeated dash was added. Additionally double underscore is - // now allowed as a separator to loosen the restriction for previously - // supported names. - separator = `(?:[._]|__|[-]+)` - - // localhost is treated as a special value for domain-name. Any other - // domain-name without a "." or a ":port" are considered a path component. - localhost = `localhost` - - // domainNameComponent restricts the registry domain component of a - // repository name to start with a component as defined by DomainRegexp. - domainNameComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])` - - // optionalPort matches an optional port-number including the port separator - // (e.g. ":80"). - optionalPort = `(?::[0-9]+)?` - - // tag matches valid tag names. From docker/docker:graph/tags.go. - tag = `[\w][\w.-]{0,127}` - - // digestPat matches well-formed digests, including algorithm (e.g. "sha256:"). - // - // TODO(thaJeztah): this should follow the same rules as https://pkg.go.dev/github.com/opencontainers/go-digest@v1.0.0#DigestRegexp - // so that go-digest defines the canonical format. Note that the go-digest is - // more relaxed: - // - it allows multiple algorithms (e.g. "sha256+b64:") to allow - // future expansion of supported algorithms. - // - it allows the "" value to use urlsafe base64 encoding as defined - // in [rfc4648, section 5]. - // - // [rfc4648, section 5]: https://www.rfc-editor.org/rfc/rfc4648#section-5. - digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}` - - // identifier is the format for a content addressable identifier using sha256. - // These identifiers are like digests without the algorithm, since sha256 is used. - identifier = `([a-f0-9]{64})` - - // ipv6address are enclosed between square brackets and may be represented - // in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format - // are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as - // IPv4-Mapped are deliberately excluded. - ipv6address = `\[(?:[a-fA-F0-9:]+)\]` -) - -var ( - // domainName defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. This includes IPv4 addresses on decimal format. - domainName = domainNameComponent + anyTimes(`\.`+domainNameComponent) - - // host defines the structure of potential domains based on the URI - // Host subcomponent on rfc3986. It may be a subset of DNS domain name, - // or an IPv4 address in decimal format, or an IPv6 address between square - // brackets (excluding zone identifiers as defined by rfc6874 or special - // addresses such as IPv4-Mapped). - host = `(?:` + domainName + `|` + ipv6address + `)` - - // allowed by the URI Host subcomponent on rfc3986 to ensure backwards - // compatibility with Docker image names. - domainAndPort = host + optionalPort - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = regexp.MustCompile(anchored(tag)) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = regexp.MustCompile(anchored(digestPat)) - - // pathComponent restricts path-components to start with an alphanumeric - // character, with following parts able to be separated by a separator - // (one period, one or two underscore and multiple dashes). - pathComponent = alphanumeric + anyTimes(separator+alphanumeric) - - // remoteName matches the remote-name of a repository. It consists of one - // or more forward slash (/) delimited path-components: - // - // pathComponent[[/pathComponent] ...] // e.g., "library/ubuntu" - remoteName = pathComponent + anyTimes(`/`+pathComponent) - namePat = optional(domainAndPort+`/`) + remoteName - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = regexp.MustCompile(anchored(optional(capture(domainAndPort), `/`), capture(remoteName))) - - referencePat = anchored(capture(namePat), optional(`:`, capture(tag)), optional(`@`, capture(digestPat))) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = regexp.MustCompile(anchored(identifier)) -) - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...string) string { - return `(?:` + strings.Join(res, "") + `)?` -} - -// anyTimes wraps the expression in a non-capturing group that can occur -// any number of times. -func anyTimes(res ...string) string { - return `(?:` + strings.Join(res, "") + `)*` -} - -// capture wraps the expression in a capturing group. -func capture(res ...string) string { - return `(` + strings.Join(res, "") + `)` -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...string) string { - return `^` + strings.Join(res, "") + `$` -} diff --git a/vendor/github.com/distribution/distribution/v3/reference/sort.go b/vendor/github.com/distribution/distribution/v3/reference/sort.go deleted file mode 100644 index 416c37b076..0000000000 --- a/vendor/github.com/distribution/distribution/v3/reference/sort.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package reference - -import ( - "sort" -) - -// Sort sorts string references preferring higher information references. -// -// The precedence is as follows: -// -// 1. [Named] + [Tagged] + [Digested] (e.g., "docker.io/library/busybox:latest@sha256:") -// 2. [Named] + [Tagged] (e.g., "docker.io/library/busybox:latest") -// 3. [Named] + [Digested] (e.g., "docker.io/library/busybo@sha256:") -// 4. [Named] (e.g., "docker.io/library/busybox") -// 5. [Digested] (e.g., "docker.io@sha256:") -// 6. Parse error -func Sort(references []string) []string { - var prefs []Reference - var bad []string - - for _, ref := range references { - pref, err := ParseAnyReference(ref) - if err != nil { - bad = append(bad, ref) - } else { - prefs = append(prefs, pref) - } - } - sort.Slice(prefs, func(a, b int) bool { - ar := refRank(prefs[a]) - br := refRank(prefs[b]) - if ar == br { - return prefs[a].String() < prefs[b].String() - } - return ar < br - }) - sort.Strings(bad) - var refs []string - for _, pref := range prefs { - refs = append(refs, pref.String()) - } - return append(refs, bad...) -} - -func refRank(ref Reference) uint8 { - if _, ok := ref.(Named); ok { - if _, ok = ref.(Tagged); ok { - if _, ok = ref.(Digested); ok { - return 1 - } - return 2 - } - if _, ok = ref.(Digested); ok { - return 3 - } - return 4 - } - return 5 -} diff --git a/vendor/github.com/distribution/distribution/v3/registry.go b/vendor/github.com/distribution/distribution/v3/registry.go index 658f2df082..d0deee65d7 100644 --- a/vendor/github.com/distribution/distribution/v3/registry.go +++ b/vendor/github.com/distribution/distribution/v3/registry.go @@ -3,7 +3,7 @@ package distribution import ( "context" - "github.com/distribution/distribution/v3/reference" + "github.com/distribution/reference" ) // Scope defines the set of items that match a namespace. diff --git a/vendor/github.com/distribution/distribution/v3/registry/api/errcode/errors.go b/vendor/github.com/distribution/distribution/v3/registry/api/errcode/errors.go index 54856cc5c0..a8b52c1881 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/api/errcode/errors.go +++ b/vendor/github.com/distribution/distribution/v3/registry/api/errcode/errors.go @@ -224,11 +224,20 @@ func (errs Errors) MarshalJSON() ([]byte, error) { msg = err.Code.Message() } - tmpErrs.Errors = append(tmpErrs.Errors, Error{ + tmpErr := Error{ Code: err.Code, Message: msg, Detail: err.Detail, - }) + } + + // if the detail contains error extract the error message + // otherwise json.Marshal will not serialize it at all + // https://github.com/golang/go/issues/10748 + if detail, ok := tmpErr.Detail.(error); ok { + tmpErr.Detail = detail.Error() + } + + tmpErrs.Errors = append(tmpErrs.Errors, tmpErr) } return json.Marshal(tmpErrs) diff --git a/vendor/github.com/distribution/distribution/v3/registry/api/errcode/register.go b/vendor/github.com/distribution/distribution/v3/registry/api/errcode/register.go index 4674d49c9d..6030c36a51 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/api/errcode/register.go +++ b/vendor/github.com/distribution/distribution/v3/registry/api/errcode/register.go @@ -16,7 +16,7 @@ var ( var ( // ErrorCodeUnknown is a generic error that can be used as a last // resort if there is no situation-specific error message that can be used - ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + ErrorCodeUnknown = register("errcode", ErrorDescriptor{ Value: "UNKNOWN", Message: "unknown error", Description: `Generic error returned when the error does not have an @@ -25,7 +25,7 @@ var ( }) // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + ErrorCodeUnsupported = register("errcode", ErrorDescriptor{ Value: "UNSUPPORTED", Message: "The operation is unsupported.", Description: `The operation was unsupported due to a missing @@ -35,7 +35,7 @@ var ( // ErrorCodeUnauthorized is returned if a request requires // authentication. - ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + ErrorCodeUnauthorized = register("errcode", ErrorDescriptor{ Value: "UNAUTHORIZED", Message: "authentication required", Description: `The access controller was unable to authenticate @@ -47,7 +47,7 @@ var ( // ErrorCodeDenied is returned if a client does not have sufficient // permission to perform an action. - ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + ErrorCodeDenied = register("errcode", ErrorDescriptor{ Value: "DENIED", Message: "requested access to the resource is denied", Description: `The access controller denied access for the @@ -57,7 +57,7 @@ var ( // ErrorCodeUnavailable provides a common error to report unavailability // of a service or endpoint. - ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + ErrorCodeUnavailable = register("errcode", ErrorDescriptor{ Value: "UNAVAILABLE", Message: "service unavailable", Description: "Returned when a service is not available", @@ -66,7 +66,7 @@ var ( // ErrorCodeTooManyRequests is returned if a client attempts too many // times to contact a service endpoint. - ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + ErrorCodeTooManyRequests = register("errcode", ErrorDescriptor{ Value: "TOOMANYREQUESTS", Message: "too many requests", Description: `Returned when a client attempts to contact a @@ -75,6 +75,157 @@ var ( }) ) +const errGroup = "registry.api.v2" + +var ( + // ErrorCodeDigestInvalid is returned when uploading a blob if the + // provided digest does not match the blob contents. + ErrorCodeDigestInvalid = register(errGroup, ErrorDescriptor{ + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeSizeInvalid is returned when uploading a blob if the provided + ErrorCodeSizeInvalid = register(errGroup, ErrorDescriptor{ + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeRangeInvalid is returned when uploading a blob if the provided + // content range is invalid. + ErrorCodeRangeInvalid = register(errGroup, ErrorDescriptor{ + Value: "RANGE_INVALID", + Message: "invalid content range", + Description: `When a layer is uploaded, the provided range is checked + against the uploaded chunk. This error is returned if the range is + out of order.`, + HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable, + }) + + // ErrorCodeNameInvalid is returned when the name in the manifest does not + // match the provided name. + ErrorCodeNameInvalid = register(errGroup, ErrorDescriptor{ + Value: "NAME_INVALID", + Message: "invalid repository name", + Description: `Invalid repository name encountered either during + manifest validation or any API operation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeTagInvalid is returned when the tag in the manifest does not + // match the provided tag. + ErrorCodeTagInvalid = register(errGroup, ErrorDescriptor{ + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameUnknown when the repository name is not known. + ErrorCodeNameUnknown = register(errGroup, ErrorDescriptor{ + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestUnknown returned when image manifest is unknown. + ErrorCodeManifestUnknown = register(errGroup, ErrorDescriptor{ + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestInvalid returned when an image manifest is invalid, + // typically during a PUT operation. This error encompasses all errors + // encountered during manifest validation that aren't signature errors. + ErrorCodeManifestInvalid = register(errGroup, ErrorDescriptor{ + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestUnverified is returned when the manifest fails + // signature verification. + ErrorCodeManifestUnverified = register(errGroup, ErrorDescriptor{ + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestBlobUnknown is returned when a manifest blob is + // unknown to the registry. + ErrorCodeManifestBlobUnknown = register(errGroup, ErrorDescriptor{ + Value: "MANIFEST_BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a manifest blob is + unknown to the registry.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeBlobUnknown is returned when a blob is unknown to the + // registry. This can happen when the manifest references a nonexistent + // layer or the result is not found by a blob fetch. + ErrorCodeBlobUnknown = register(errGroup, ErrorDescriptor{ + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. + ErrorCodeBlobUploadUnknown = register(errGroup, ErrorDescriptor{ + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. + ErrorCodeBlobUploadInvalid = register(errGroup, ErrorDescriptor{ + Value: "BLOB_UPLOAD_INVALID", + Message: "blob upload invalid", + Description: `The blob upload encountered an error and can no + longer proceed.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodePaginationNumberInvalid is returned when the `n` parameter is + // not an integer, or `n` is negative. + ErrorCodePaginationNumberInvalid = register(errGroup, ErrorDescriptor{ + Value: "PAGINATION_NUMBER_INVALID", + Message: "invalid number of results requested", + Description: `Returned when the "n" parameter (number of results + to return) is not an integer, "n" is negative or "n" is bigger than + the maximum allowed.`, + HTTPStatusCode: http.StatusBadRequest, + }) +) + var ( nextCode = 1000 registerLock sync.Mutex @@ -83,6 +234,12 @@ var ( // Register will make the passed-in error known to the environment and // return a new ErrorCode func Register(group string, descriptor ErrorDescriptor) ErrorCode { + return register(group, descriptor) +} + +// register will make the passed-in error known to the environment and +// return a new ErrorCode +func register(group string, descriptor ErrorDescriptor) ErrorCode { registerLock.Lock() defer registerLock.Unlock() diff --git a/vendor/github.com/distribution/distribution/v3/registry/api/v2/descriptors.go b/vendor/github.com/distribution/distribution/v3/registry/api/v2/descriptors.go index 8622d0d216..16b9cffeba 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/api/v2/descriptors.go +++ b/vendor/github.com/distribution/distribution/v3/registry/api/v2/descriptors.go @@ -4,11 +4,21 @@ import ( "net/http" "regexp" - "github.com/distribution/distribution/v3/reference" "github.com/distribution/distribution/v3/registry/api/errcode" + "github.com/distribution/reference" "github.com/opencontainers/go-digest" ) +var routeDescriptorsMap map[string]RouteDescriptor + +func init() { + routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) + + for _, descriptor := range routeDescriptors { + routeDescriptorsMap[descriptor.Name] = descriptor + } +} + var ( nameParameterDescriptor = ParameterDescriptor{ Name: "name", @@ -143,7 +153,7 @@ var ( Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodePaginationNumberInvalid, + errcode.ErrorCodePaginationNumberInvalid, }, } @@ -164,7 +174,7 @@ var ( Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, + errcode.ErrorCodeNameUnknown, }, } @@ -213,21 +223,20 @@ var ( const ( manifestBody = `{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": "" - }, - ... - ] - ], - "history": , - "signature": + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ], + "history": , + "signature": }` errorsBody = `{ - "errors:" [ + "errors": [ { "code": , "message": "", @@ -259,7 +268,7 @@ type RouteDescriptor struct { // should match. Path string - // Entity should be a short, human-readalbe description of the object + // Entity should be a short, human-readable description of the object // targeted by the endpoint. Entity string @@ -550,8 +559,8 @@ var routeDescriptors = []RouteDescriptor{ Description: "The name or reference was invalid.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, + errcode.ErrorCodeNameInvalid, + errcode.ErrorCodeTagInvalid, }, Body: BodyDescriptor{ ContentType: "application/json", @@ -609,11 +618,11 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - ErrorCodeManifestInvalid, - ErrorCodeManifestUnverified, - ErrorCodeBlobUnknown, + errcode.ErrorCodeNameInvalid, + errcode.ErrorCodeTagInvalid, + errcode.ErrorCodeManifestInvalid, + errcode.ErrorCodeManifestUnverified, + errcode.ErrorCodeBlobUnknown, }, }, unauthorizedResponseDescriptor, @@ -625,12 +634,13 @@ var routeDescriptors = []RouteDescriptor{ Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUnknown, + errcode.ErrorCodeBlobUnknown, }, Body: BodyDescriptor{ ContentType: "application/json", Format: `{ - "errors:" [{ + "errors": [ + { "code": "BLOB_UNKNOWN", "message": "blob unknown to registry", "detail": { @@ -678,8 +688,8 @@ var routeDescriptors = []RouteDescriptor{ Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, + errcode.ErrorCodeNameInvalid, + errcode.ErrorCodeTagInvalid, }, Body: BodyDescriptor{ ContentType: "application/json", @@ -695,8 +705,8 @@ var routeDescriptors = []RouteDescriptor{ Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest or tag was already deleted if this response is returned.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeManifestUnknown, + errcode.ErrorCodeNameUnknown, + errcode.ErrorCodeManifestUnknown, }, Body: BodyDescriptor{ ContentType: "application/json", @@ -775,8 +785,8 @@ var routeDescriptors = []RouteDescriptor{ Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, + errcode.ErrorCodeNameInvalid, + errcode.ErrorCodeDigestInvalid, }, Body: BodyDescriptor{ ContentType: "application/json", @@ -791,8 +801,8 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, + errcode.ErrorCodeNameUnknown, + errcode.ErrorCodeBlobUnknown, }, }, unauthorizedResponseDescriptor, @@ -847,8 +857,8 @@ var routeDescriptors = []RouteDescriptor{ Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, + errcode.ErrorCodeNameInvalid, + errcode.ErrorCodeDigestInvalid, }, Body: BodyDescriptor{ ContentType: "application/json", @@ -858,8 +868,8 @@ var routeDescriptors = []RouteDescriptor{ { StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, + errcode.ErrorCodeNameUnknown, + errcode.ErrorCodeBlobUnknown, }, Body: BodyDescriptor{ ContentType: "application/json", @@ -910,8 +920,8 @@ var routeDescriptors = []RouteDescriptor{ Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, + errcode.ErrorCodeDigestInvalid, + errcode.ErrorCodeNameInvalid, }, }, { @@ -922,8 +932,8 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, + errcode.ErrorCodeNameUnknown, + errcode.ErrorCodeBlobUnknown, }, }, { @@ -987,7 +997,7 @@ var routeDescriptors = []RouteDescriptor{ }, }, Body: BodyDescriptor{ - ContentType: "application/octect-stream", + ContentType: "application/octet-stream", Format: "", }, Successes: []ResponseDescriptor{ @@ -1010,8 +1020,8 @@ var routeDescriptors = []RouteDescriptor{ Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, + errcode.ErrorCodeDigestInvalid, + errcode.ErrorCodeNameInvalid, }, }, { @@ -1065,8 +1075,8 @@ var routeDescriptors = []RouteDescriptor{ Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, + errcode.ErrorCodeDigestInvalid, + errcode.ErrorCodeNameInvalid, }, }, unauthorizedResponseDescriptor, @@ -1122,8 +1132,8 @@ var routeDescriptors = []RouteDescriptor{ Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, + errcode.ErrorCodeDigestInvalid, + errcode.ErrorCodeNameInvalid, }, }, { @@ -1187,9 +1197,9 @@ var routeDescriptors = []RouteDescriptor{ Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeDigestInvalid, + errcode.ErrorCodeNameInvalid, + errcode.ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ ContentType: "application/json", @@ -1200,7 +1210,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, + errcode.ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json", @@ -1262,9 +1272,9 @@ var routeDescriptors = []RouteDescriptor{ Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeDigestInvalid, + errcode.ErrorCodeNameInvalid, + errcode.ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ ContentType: "application/json", @@ -1275,7 +1285,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, + errcode.ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json", @@ -1344,9 +1354,9 @@ var routeDescriptors = []RouteDescriptor{ Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeDigestInvalid, + errcode.ErrorCodeNameInvalid, + errcode.ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ ContentType: "application/json", @@ -1357,7 +1367,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, + errcode.ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json", @@ -1438,9 +1448,9 @@ var routeDescriptors = []RouteDescriptor{ Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeDigestInvalid, + errcode.ErrorCodeNameInvalid, + errcode.ErrorCodeBlobUploadInvalid, errcode.ErrorCodeUnsupported, }, Body: BodyDescriptor{ @@ -1452,7 +1462,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, + errcode.ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json", @@ -1497,8 +1507,8 @@ var routeDescriptors = []RouteDescriptor{ Description: "An error was encountered processing the delete. The client may ignore this error.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeNameInvalid, + errcode.ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ ContentType: "application/json", @@ -1509,7 +1519,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, + errcode.ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json", @@ -1557,7 +1567,7 @@ var routeDescriptors = []RouteDescriptor{ "repositories": [ , ... - ] + ], }`, }, }, @@ -1576,7 +1586,7 @@ var routeDescriptors = []RouteDescriptor{ "repositories": [ , ... - ] + ], "next": "?last=&n=" }`, }, @@ -1600,13 +1610,3 @@ var routeDescriptors = []RouteDescriptor{ }, }, } - -var routeDescriptorsMap map[string]RouteDescriptor - -func init() { - routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) - - for _, descriptor := range routeDescriptors { - routeDescriptorsMap[descriptor.Name] = descriptor - } -} diff --git a/vendor/github.com/distribution/distribution/v3/registry/api/v2/errors.go b/vendor/github.com/distribution/distribution/v3/registry/api/v2/errors.go deleted file mode 100644 index dcbabfca01..0000000000 --- a/vendor/github.com/distribution/distribution/v3/registry/api/v2/errors.go +++ /dev/null @@ -1,158 +0,0 @@ -package v2 - -import ( - "net/http" - - "github.com/distribution/distribution/v3/registry/api/errcode" -) - -const errGroup = "registry.api.v2" - -var ( - // ErrorCodeDigestInvalid is returned when uploading a blob if the - // provided digest does not match the blob contents. - ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "DIGEST_INVALID", - Message: "provided digest did not match uploaded content", - Description: `When a blob is uploaded, the registry will check that - the content matches the digest provided by the client. The error may - include a detail structure with the key "digest", including the - invalid digest string. This error may also be returned when a manifest - includes an invalid layer digest.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeSizeInvalid is returned when uploading a blob if the provided - ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "SIZE_INVALID", - Message: "provided length did not match content length", - Description: `When a layer is uploaded, the provided size will be - checked against the uploaded content. If they do not match, this error - will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeRangeInvalid is returned when uploading a blob if the provided - // content range is invalid. - ErrorCodeRangeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RANGE_INVALID", - Message: "invalid content range", - Description: `When a layer is uploaded, the provided range is checked - against the uploaded chunk. This error is returned if the range is - out of order.`, - HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable, - }) - - // ErrorCodeNameInvalid is returned when the name in the manifest does not - // match the provided name. - ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_INVALID", - Message: "invalid repository name", - Description: `Invalid repository name encountered either during - manifest validation or any API operation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeTagInvalid is returned when the tag in the manifest does not - // match the provided tag. - ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "TAG_INVALID", - Message: "manifest tag did not match URI", - Description: `During a manifest upload, if the tag in the manifest - does not match the uri tag, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameUnknown when the repository name is not known. - ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_UNKNOWN", - Message: "repository name not known to registry", - Description: `This is returned if the name used during an operation is - unknown to the registry.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestUnknown returned when image manifest is unknown. - ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNKNOWN", - Message: "manifest unknown", - Description: `This error is returned when the manifest, identified by - name and tag is unknown to the repository.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestInvalid returned when an image manifest is invalid, - // typically during a PUT operation. This error encompasses all errors - // encountered during manifest validation that aren't signature errors. - ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_INVALID", - Message: "manifest invalid", - Description: `During upload, manifests undergo several checks ensuring - validity. If those checks fail, this error may be returned, unless a - more specific error is included. The detail will contain information - the failed validation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestUnverified is returned when the manifest fails - // signature verification. - ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNVERIFIED", - Message: "manifest failed signature verification", - Description: `During manifest upload, if the manifest fails signature - verification, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestBlobUnknown is returned when a manifest blob is - // unknown to the registry. - ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a manifest blob is - unknown to the registry.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeBlobUnknown is returned when a blob is unknown to the - // registry. This can happen when the manifest references a nonexistent - // layer or the result is not found by a blob fetch. - ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a blob is unknown to the - registry in a specified repository. This can be returned with a - standard get or if a manifest references an unknown layer during - upload.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. - ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_UNKNOWN", - Message: "blob upload unknown to registry", - Description: `If a blob upload has been cancelled or was never - started, this error code may be returned.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. - ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_INVALID", - Message: "blob upload invalid", - Description: `The blob upload encountered an error and can no - longer proceed.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodePaginationNumberInvalid is returned when the `n` parameter is - // not an integer, or `n` is negative. - ErrorCodePaginationNumberInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "PAGINATION_NUMBER_INVALID", - Message: "invalid number of results requested", - Description: `Returned when the "n" parameter (number of results - to return) is not an integer, "n" is negative or "n" is bigger than - the maximum allowed.`, - HTTPStatusCode: http.StatusBadRequest, - }) -) diff --git a/vendor/github.com/distribution/distribution/v3/registry/api/v2/errors_deprecated.go b/vendor/github.com/distribution/distribution/v3/registry/api/v2/errors_deprecated.go new file mode 100644 index 0000000000..b8ba2b2539 --- /dev/null +++ b/vendor/github.com/distribution/distribution/v3/registry/api/v2/errors_deprecated.go @@ -0,0 +1,86 @@ +package v2 + +import "github.com/distribution/distribution/v3/registry/api/errcode" + +var ( + // ErrorCodeDigestInvalid is returned when uploading a blob if the + // provided digest does not match the blob contents. + // + // Deprecated: use [errcode.ErrorCodeDigestInvalid]. + ErrorCodeDigestInvalid = errcode.ErrorCodeDigestInvalid + + // ErrorCodeSizeInvalid is returned when uploading a blob if the provided + // + // Deprecated: use [errcode.ErrorCodeSizeInvalid]. + ErrorCodeSizeInvalid = errcode.ErrorCodeSizeInvalid + + // ErrorCodeRangeInvalid is returned when uploading a blob if the provided + // content range is invalid. + // + // Deprecated: use [errcode.ErrorCodeRangeInvalid]. + ErrorCodeRangeInvalid = errcode.ErrorCodeRangeInvalid + + // ErrorCodeNameInvalid is returned when the name in the manifest does not + // match the provided name. + // + // Deprecated: use [errcode.ErrorCodeNameInvalid]. + ErrorCodeNameInvalid = errcode.ErrorCodeNameInvalid + + // ErrorCodeTagInvalid is returned when the tag in the manifest does not + // match the provided tag. + // + // Deprecated: use [errcode.ErrorCodeTagInvalid]. + ErrorCodeTagInvalid = errcode.ErrorCodeTagInvalid + + // ErrorCodeNameUnknown when the repository name is not known. + // + // Deprecated: use [errcode.ErrorCodeNameUnknown]. + ErrorCodeNameUnknown = errcode.ErrorCodeNameUnknown + + // ErrorCodeManifestUnknown returned when image manifest is unknown. + // + // Deprecated: use [errcode.ErrorCodeManifestUnknown]. + ErrorCodeManifestUnknown = errcode.ErrorCodeManifestUnknown + + // ErrorCodeManifestInvalid returned when an image manifest is invalid, + // typically during a PUT operation. This error encompasses all errors + // encountered during manifest validation that aren't signature errors. + // + // Deprecated: use [errcode.ErrorCodeManifestInvalid]. + ErrorCodeManifestInvalid = errcode.ErrorCodeManifestInvalid + + // ErrorCodeManifestUnverified is returned when the manifest fails + // signature verification. + // + // Deprecated: use [errcode.ErrorCodeManifestUnverified]. + ErrorCodeManifestUnverified = errcode.ErrorCodeManifestUnverified + + // ErrorCodeManifestBlobUnknown is returned when a manifest blob is + // unknown to the registry. + // + // Deprecated: use [errcode.ErrorCodeManifestBlobUnknown]. + ErrorCodeManifestBlobUnknown = errcode.ErrorCodeManifestBlobUnknown + + // ErrorCodeBlobUnknown is returned when a blob is unknown to the + // registry. This can happen when the manifest references a nonexistent + // layer or the result is not found by a blob fetch. + // + // Deprecated: use [errcode.ErrorCodeBlobUnknown]. + ErrorCodeBlobUnknown = errcode.ErrorCodeBlobUnknown + + // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. + // + // Deprecated: use [errcode.ErrorCodeBlobUploadUnknown]. + ErrorCodeBlobUploadUnknown = errcode.ErrorCodeBlobUploadUnknown + + // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. + // + // Deprecated: use [errcode.ErrorCodeBlobUploadInvalid]. + ErrorCodeBlobUploadInvalid = errcode.ErrorCodeBlobUploadInvalid + + // ErrorCodePaginationNumberInvalid is returned when the `n` parameter is + // not an integer, or `n` is negative. + // + // Deprecated: use [errcode.ErrorCodePaginationNumberInvalid]. + ErrorCodePaginationNumberInvalid = errcode.ErrorCodePaginationNumberInvalid +) diff --git a/vendor/github.com/distribution/distribution/v3/registry/api/v2/urls.go b/vendor/github.com/distribution/distribution/v3/registry/api/v2/urls.go index f4aa90954b..80617fd3dc 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/api/v2/urls.go +++ b/vendor/github.com/distribution/distribution/v3/registry/api/v2/urls.go @@ -6,7 +6,7 @@ import ( "net/url" "strings" - "github.com/distribution/distribution/v3/reference" + "github.com/distribution/reference" "github.com/gorilla/mux" ) @@ -202,7 +202,7 @@ func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, return appendValuesURL(uploadURL, values...).String(), nil } -// clondedRoute returns a clone of the named route from the router. Routes +// cloneRoute returns a clone of the named route from the router. Routes // must be cloned to avoid modifying them during url generation. func (ub *URLBuilder) cloneRoute(name string) clonedRoute { route := new(mux.Route) diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/cache/cache.go b/vendor/github.com/distribution/distribution/v3/registry/storage/cache/cache.go index 0aa9dd4a80..8cf9b2b04d 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/cache/cache.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/cache/cache.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/distribution/distribution/v3" + v1 "github.com/opencontainers/image-spec/specs-go/v1" ) // BlobDescriptorCacheProvider provides repository scoped @@ -18,7 +19,7 @@ type BlobDescriptorCacheProvider interface { // ValidateDescriptor provides a helper function to ensure that caches have // common criteria for admitting descriptors. -func ValidateDescriptor(desc distribution.Descriptor) error { +func ValidateDescriptor(desc v1.Descriptor) error { if err := desc.Digest.Validate(); err != nil { return err } diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/distribution/distribution/v3/registry/storage/cache/cachedblobdescriptorstore.go index 06a56c99e8..d3f708449c 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/cache/cachedblobdescriptorstore.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/cache/cachedblobdescriptorstore.go @@ -4,9 +4,10 @@ import ( "context" "github.com/distribution/distribution/v3" - dcontext "github.com/distribution/distribution/v3/context" + "github.com/distribution/distribution/v3/internal/dcontext" prometheus "github.com/distribution/distribution/v3/metrics" "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" ) type cachedBlobStatter struct { @@ -14,8 +15,14 @@ type cachedBlobStatter struct { backend distribution.BlobDescriptorService } -// cacheCount is the number of total cache request received/hits/misses -var cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type") +var ( + // cacheRequestCount is the number of total cache requests received. + cacheRequestCount = prometheus.StorageNamespace.NewCounter("cache_requests", "The number of cache request received") + // cacheRequestCount is the number of total cache requests received. + cacheHitCount = prometheus.StorageNamespace.NewCounter("cache_hits", "The number of cache request received") + // cacheErrorCount is the number of cache request errors. + cacheErrorCount = prometheus.StorageNamespace.NewCounter("cache_errors", "The number of cache request errors") +) // NewCachedBlobStatter creates a new statter which prefers a cache and // falls back to a backend. @@ -26,13 +33,13 @@ func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend dist } } -func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - cacheCount.WithValues("Request").Inc(1) +func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) { + cacheRequestCount.Inc(1) // try getting from cache desc, cacheErr := cbds.cache.Stat(ctx, dgst) if cacheErr == nil { - cacheCount.WithValues("Hit").Inc(1) + cacheHitCount.Inc(1) return desc, nil } @@ -43,8 +50,6 @@ func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (di } if cacheErr == distribution.ErrBlobUnknown { - // cache doesn't have info. update it with info got from backend - cacheCount.WithValues("Miss").Inc(1) if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(err).Error("error from cache setting desc") } @@ -52,7 +57,7 @@ func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (di } else { // unknown error from cache. just log and error. do not store cache as it may be trigger many set calls dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(cacheErr).Error("error from cache stat(ing) blob") - cacheCount.WithValues("Error").Inc(1) + cacheErrorCount.Inc(1) } return desc, nil @@ -71,7 +76,7 @@ func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) er return nil } -func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { +func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error { if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(err).Error("error from cache setting desc") } diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/cache/memory/memory.go b/vendor/github.com/distribution/distribution/v3/registry/storage/cache/memory/memory.go index 6413468eee..8bf996ad7e 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/cache/memory/memory.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/cache/memory/memory.go @@ -5,10 +5,11 @@ import ( "math" "github.com/distribution/distribution/v3" - "github.com/distribution/distribution/v3/reference" "github.com/distribution/distribution/v3/registry/storage/cache" - lru "github.com/hashicorp/golang-lru" + "github.com/distribution/reference" + "github.com/hashicorp/golang-lru/arc/v2" "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" ) const ( @@ -26,7 +27,7 @@ type descriptorCacheKey struct { } type inMemoryBlobDescriptorCacheProvider struct { - lru *lru.ARCCache + lru *arc.ARCCache[descriptorCacheKey, v1.Descriptor] } // NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for @@ -35,7 +36,7 @@ func NewInMemoryBlobDescriptorCacheProvider(size int) cache.BlobDescriptorCacheP if size <= 0 { size = math.MaxInt } - lruCache, err := lru.NewARC(size) + lruCache, err := arc.NewARC[descriptorCacheKey, v1.Descriptor](size) if err != nil { // NewARC can only fail if size is <= 0, so this unreachable panic(err) @@ -47,6 +48,12 @@ func NewInMemoryBlobDescriptorCacheProvider(size int) cache.BlobDescriptorCacheP func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { if _, err := reference.ParseNormalizedNamed(repo); err != nil { + if err == reference.ErrNameTooLong { + return nil, distribution.ErrRepositoryNameInvalid{ + Name: repo, + Reason: reference.ErrNameTooLong, + } + } return nil, err } @@ -56,9 +63,9 @@ func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) }, nil } -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) { if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } key := descriptorCacheKey{ @@ -66,13 +73,9 @@ func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgs } descriptor, ok := imbdcp.lru.Get(key) if ok { - // Type assertion not really necessary, but included in case - // it's necessary for the fuzzer - if desc, ok := descriptor.(distribution.Descriptor); ok { - return desc, nil - } + return descriptor, nil } - return distribution.Descriptor{}, distribution.ErrBlobUnknown + return v1.Descriptor{}, distribution.ErrBlobUnknown } func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { @@ -83,7 +86,7 @@ func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dg return nil } -func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { +func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error { _, err := imbdcp.Stat(ctx, dgst) if err == distribution.ErrBlobUnknown { if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { @@ -119,9 +122,9 @@ type repositoryScopedInMemoryBlobDescriptorCache struct { parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map } -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) { if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } key := descriptorCacheKey{ @@ -130,13 +133,9 @@ func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Co } descriptor, ok := rsimbdcp.parent.lru.Get(key) if ok { - // Type assertion not really necessary, but included in case - // it's necessary for the fuzzer - if desc, ok := descriptor.(distribution.Descriptor); ok { - return desc, nil - } + return descriptor, nil } - return distribution.Descriptor{}, distribution.ErrBlobUnknown + return v1.Descriptor{}, distribution.ErrBlobUnknown } func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { @@ -148,7 +147,7 @@ func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.C return nil } -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error { if err := dgst.Validate(); err != nil { return err } diff --git a/vendor/github.com/distribution/distribution/v3/tags.go b/vendor/github.com/distribution/distribution/v3/tags.go index 6033575ce3..ed94a51a0c 100644 --- a/vendor/github.com/distribution/distribution/v3/tags.go +++ b/vendor/github.com/distribution/distribution/v3/tags.go @@ -4,6 +4,7 @@ import ( "context" "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" ) // TagService provides access to information about tagged objects. @@ -12,11 +13,11 @@ type TagService interface { // implementations may differentiate between "trusted" tags and // "untrusted" tags. If a tag is "untrusted", the mapping will be returned // as an ErrTagUntrusted error, with the target descriptor. - Get(ctx context.Context, tag string) (Descriptor, error) + Get(ctx context.Context, tag string) (v1.Descriptor, error) // Tag associates the tag with the provided descriptor, updating the // current association, if needed. - Tag(ctx context.Context, tag string, desc Descriptor) error + Tag(ctx context.Context, tag string, desc v1.Descriptor) error // Untag removes the given tag association Untag(ctx context.Context, tag string) error @@ -25,7 +26,7 @@ type TagService interface { All(ctx context.Context) ([]string, error) // Lookup returns the set of tags referencing the given digest. - Lookup(ctx context.Context, digest Descriptor) ([]string, error) + Lookup(ctx context.Context, digest v1.Descriptor) ([]string, error) } // TagManifestsProvider provides method to retrieve the digests of manifests that a tag historically diff --git a/vendor/github.com/distribution/distribution/v3/uuid/uuid.go b/vendor/github.com/distribution/distribution/v3/uuid/uuid.go deleted file mode 100644 index d433ccaf51..0000000000 --- a/vendor/github.com/distribution/distribution/v3/uuid/uuid.go +++ /dev/null @@ -1,126 +0,0 @@ -// Package uuid provides simple UUID generation. Only version 4 style UUIDs -// can be generated. -// -// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs. -package uuid - -import ( - "crypto/rand" - "fmt" - "io" - "os" - "syscall" - "time" -) - -const ( - // Bits is the number of bits in a UUID - Bits = 128 - - // Size is the number of bytes in a UUID - Size = Bits / 8 - - format = "%08x-%04x-%04x-%04x-%012x" -) - -var ( - // ErrUUIDInvalid indicates a parsed string is not a valid uuid. - ErrUUIDInvalid = fmt.Errorf("invalid uuid") - - // Loggerf can be used to override the default logging destination. Such - // log messages in this library should be logged at warning or higher. - Loggerf = func(format string, args ...interface{}) {} -) - -// UUID represents a UUID value. UUIDs can be compared and set to other values -// and accessed by byte. -type UUID [Size]byte - -// Generate creates a new, version 4 uuid. -func Generate() (u UUID) { - const ( - // ensures we backoff for less than 450ms total. Use the following to - // select new value, in units of 10ms: - // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 - maxretries = 9 - backoff = time.Millisecond * 10 - ) - - var ( - totalBackoff time.Duration - count int - retries int - ) - - for { - // This should never block but the read may fail. Because of this, - // we just try to read the random number generator until we get - // something. This is a very rare condition but may happen. - b := time.Duration(retries) * backoff - time.Sleep(b) - totalBackoff += b - - n, err := io.ReadFull(rand.Reader, u[count:]) - if err != nil { - if retryOnError(err) && retries < maxretries { - count += n - retries++ - Loggerf("error generating version 4 uuid, retrying: %v", err) - continue - } - - // Any other errors represent a system problem. What did someone - // do to /dev/urandom? - panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) - } - - break - } - - u[6] = (u[6] & 0x0f) | 0x40 // set version byte - u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} - - return u -} - -// Parse attempts to extract a uuid from the string or returns an error. -func Parse(s string) (u UUID, err error) { - if len(s) != 36 { - return UUID{}, ErrUUIDInvalid - } - - // create stack addresses for each section of the uuid. - p := make([][]byte, 5) - - if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil { - return u, err - } - - copy(u[0:4], p[0]) - copy(u[4:6], p[1]) - copy(u[6:8], p[2]) - copy(u[8:10], p[3]) - copy(u[10:16], p[4]) - - return -} - -func (u UUID) String() string { - return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) -} - -// retryOnError tries to detect whether or not retrying would be fruitful. -func retryOnError(err error) bool { - switch err := err.(type) { - case *os.PathError: - return retryOnError(err.Err) // unpack the target error - case syscall.Errno: - if err == syscall.EPERM { - // EPERM represents an entropy pool exhaustion, a condition under - // which we backoff and retry. - return true - } - } - - return false -} diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml index 0cffafa7bf..0ed62c1a18 100644 --- a/vendor/github.com/go-logr/logr/.golangci.yaml +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -1,26 +1,28 @@ +version: "2" + run: timeout: 1m tests: true linters: - disable-all: true - enable: + default: none + enable: # please keep this alphabetized + - asasalint - asciicheck + - copyloopvar + - dupl - errcheck - forcetypeassert + - goconst - gocritic - - gofmt - - goimports - - gosimple - govet - ineffassign - misspell + - musttag - revive - staticcheck - - typecheck - unused issues: - exclude-use-default: false max-issues-per-linter: 0 max-same-issues: 10 diff --git a/vendor/github.com/hashicorp/golang-lru/.gitignore b/vendor/github.com/hashicorp/golang-lru/.gitignore deleted file mode 100644 index 836562412f..0000000000 --- a/vendor/github.com/hashicorp/golang-lru/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test diff --git a/vendor/github.com/hashicorp/golang-lru/.golangci.yml b/vendor/github.com/hashicorp/golang-lru/.golangci.yml deleted file mode 100644 index 49202fc41e..0000000000 --- a/vendor/github.com/hashicorp/golang-lru/.golangci.yml +++ /dev/null @@ -1,30 +0,0 @@ -linters: - enable: - - megacheck - - revive - - govet - - unconvert - - megacheck - - gas - - gocyclo - - dupl - - misspell - - unparam - - unused - - typecheck - - ineffassign - - stylecheck - - exportloopref - - gocritic - - nakedret - - gosimple - - prealloc - fast: false - disable-all: true - -issues: - exclude-rules: - - path: _test\.go - linters: - - dupl - exclude-use-default: false diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go deleted file mode 100644 index 15fcad0306..0000000000 --- a/vendor/github.com/hashicorp/golang-lru/2q.go +++ /dev/null @@ -1,222 +0,0 @@ -package lru - -import ( - "fmt" - "sync" - - "github.com/hashicorp/golang-lru/simplelru" -) - -const ( - // Default2QRecentRatio is the ratio of the 2Q cache dedicated - // to recently added entries that have only been accessed once. - Default2QRecentRatio = 0.25 - - // Default2QGhostEntries is the default ratio of ghost - // entries kept to track entries recently evicted - Default2QGhostEntries = 0.50 -) - -// TwoQueueCache is a thread-safe fixed size 2Q cache. -// 2Q is an enhancement over the standard LRU cache -// in that it tracks both frequently and recently used -// entries separately. This avoids a burst in access to new -// entries from evicting frequently used entries. It adds some -// additional tracking overhead to the standard LRU cache, and is -// computationally about 2x the cost, and adds some metadata over -// head. The ARCCache is similar, but does not require setting any -// parameters. -type TwoQueueCache struct { - size int - recentSize int - - recent simplelru.LRUCache - frequent simplelru.LRUCache - recentEvict simplelru.LRUCache - lock sync.RWMutex -} - -// New2Q creates a new TwoQueueCache using the default -// values for the parameters. -func New2Q(size int) (*TwoQueueCache, error) { - return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) -} - -// New2QParams creates a new TwoQueueCache using the provided -// parameter values. -func New2QParams(size int, recentRatio, ghostRatio float64) (*TwoQueueCache, error) { - if size <= 0 { - return nil, fmt.Errorf("invalid size") - } - if recentRatio < 0.0 || recentRatio > 1.0 { - return nil, fmt.Errorf("invalid recent ratio") - } - if ghostRatio < 0.0 || ghostRatio > 1.0 { - return nil, fmt.Errorf("invalid ghost ratio") - } - - // Determine the sub-sizes - recentSize := int(float64(size) * recentRatio) - evictSize := int(float64(size) * ghostRatio) - - // Allocate the LRUs - recent, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - frequent, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - recentEvict, err := simplelru.NewLRU(evictSize, nil) - if err != nil { - return nil, err - } - - // Initialize the cache - c := &TwoQueueCache{ - size: size, - recentSize: recentSize, - recent: recent, - frequent: frequent, - recentEvict: recentEvict, - } - return c, nil -} - -// Get looks up a key's value from the cache. -func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { - c.lock.Lock() - defer c.lock.Unlock() - - // Check if this is a frequent value - if val, ok := c.frequent.Get(key); ok { - return val, ok - } - - // If the value is contained in recent, then we - // promote it to frequent - if val, ok := c.recent.Peek(key); ok { - c.recent.Remove(key) - c.frequent.Add(key, val) - return val, ok - } - - // No hit - return nil, false -} - -// Add adds a value to the cache. -func (c *TwoQueueCache) Add(key, value interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - - // Check if the value is frequently used already, - // and just update the value - if c.frequent.Contains(key) { - c.frequent.Add(key, value) - return - } - - // Check if the value is recently used, and promote - // the value into the frequent list - if c.recent.Contains(key) { - c.recent.Remove(key) - c.frequent.Add(key, value) - return - } - - // If the value was recently evicted, add it to the - // frequently used list - if c.recentEvict.Contains(key) { - c.ensureSpace(true) - c.recentEvict.Remove(key) - c.frequent.Add(key, value) - return - } - - // Add to the recently seen list - c.ensureSpace(false) - c.recent.Add(key, value) -} - -// ensureSpace is used to ensure we have space in the cache -func (c *TwoQueueCache) ensureSpace(recentEvict bool) { - // If we have space, nothing to do - recentLen := c.recent.Len() - freqLen := c.frequent.Len() - if recentLen+freqLen < c.size { - return - } - - // If the recent buffer is larger than - // the target, evict from there - if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { - k, _, _ := c.recent.RemoveOldest() - c.recentEvict.Add(k, nil) - return - } - - // Remove from the frequent list otherwise - c.frequent.RemoveOldest() -} - -// Len returns the number of items in the cache. -func (c *TwoQueueCache) Len() int { - c.lock.RLock() - defer c.lock.RUnlock() - return c.recent.Len() + c.frequent.Len() -} - -// Keys returns a slice of the keys in the cache. -// The frequently used keys are first in the returned slice. -func (c *TwoQueueCache) Keys() []interface{} { - c.lock.RLock() - defer c.lock.RUnlock() - k1 := c.frequent.Keys() - k2 := c.recent.Keys() - return append(k1, k2...) -} - -// Remove removes the provided key from the cache. -func (c *TwoQueueCache) Remove(key interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - if c.frequent.Remove(key) { - return - } - if c.recent.Remove(key) { - return - } - if c.recentEvict.Remove(key) { - return - } -} - -// Purge is used to completely clear the cache. -func (c *TwoQueueCache) Purge() { - c.lock.Lock() - defer c.lock.Unlock() - c.recent.Purge() - c.frequent.Purge() - c.recentEvict.Purge() -} - -// Contains is used to check if the cache contains a key -// without updating recency or frequency. -func (c *TwoQueueCache) Contains(key interface{}) bool { - c.lock.RLock() - defer c.lock.RUnlock() - return c.frequent.Contains(key) || c.recent.Contains(key) -} - -// Peek is used to inspect the cache value of a key -// without updating recency or frequency. -func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { - c.lock.RLock() - defer c.lock.RUnlock() - if val, ok := c.frequent.Peek(key); ok { - return val, ok - } - return c.recent.Peek(key) -} diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md deleted file mode 100644 index 03bcfb5b76..0000000000 --- a/vendor/github.com/hashicorp/golang-lru/README.md +++ /dev/null @@ -1,7 +0,0 @@ -golang-lru -========== - -Please upgrade to github.com/hashicorp/golang-lru/v2 for all new code as v1 will -not be updated anymore. The v2 version supports generics and is faster; old code -can specify a specific tag, e.g. github.com/hashicorp/golang-lru/v1.0.2 for -backwards compatibility. diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/arc/v2/LICENSE similarity index 100% rename from vendor/github.com/hashicorp/golang-lru/LICENSE rename to vendor/github.com/hashicorp/golang-lru/arc/v2/LICENSE diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc/v2/arc.go similarity index 76% rename from vendor/github.com/hashicorp/golang-lru/arc.go rename to vendor/github.com/hashicorp/golang-lru/arc/v2/arc.go index e396f8428a..724f5fe0b8 100644 --- a/vendor/github.com/hashicorp/golang-lru/arc.go +++ b/vendor/github.com/hashicorp/golang-lru/arc/v2/arc.go @@ -1,9 +1,12 @@ -package lru +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arc import ( "sync" - "github.com/hashicorp/golang-lru/simplelru" + "github.com/hashicorp/golang-lru/v2/simplelru" ) // ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). @@ -14,41 +17,41 @@ import ( // it is roughly 2x the cost, and the extra memory overhead is linear // with the size of the cache. ARC has been patented by IBM, but is // similar to the TwoQueueCache (2Q) which requires setting parameters. -type ARCCache struct { +type ARCCache[K comparable, V any] struct { size int // Size is the total capacity of the cache p int // P is the dynamic preference towards T1 or T2 - t1 simplelru.LRUCache // T1 is the LRU for recently accessed items - b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 + t1 simplelru.LRUCache[K, V] // T1 is the LRU for recently accessed items + b1 simplelru.LRUCache[K, struct{}] // B1 is the LRU for evictions from t1 - t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items - b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 + t2 simplelru.LRUCache[K, V] // T2 is the LRU for frequently accessed items + b2 simplelru.LRUCache[K, struct{}] // B2 is the LRU for evictions from t2 lock sync.RWMutex } // NewARC creates an ARC of the given size -func NewARC(size int) (*ARCCache, error) { +func NewARC[K comparable, V any](size int) (*ARCCache[K, V], error) { // Create the sub LRUs - b1, err := simplelru.NewLRU(size, nil) + b1, err := simplelru.NewLRU[K, struct{}](size, nil) if err != nil { return nil, err } - b2, err := simplelru.NewLRU(size, nil) + b2, err := simplelru.NewLRU[K, struct{}](size, nil) if err != nil { return nil, err } - t1, err := simplelru.NewLRU(size, nil) + t1, err := simplelru.NewLRU[K, V](size, nil) if err != nil { return nil, err } - t2, err := simplelru.NewLRU(size, nil) + t2, err := simplelru.NewLRU[K, V](size, nil) if err != nil { return nil, err } // Initialize the ARC - c := &ARCCache{ + c := &ARCCache[K, V]{ size: size, p: 0, t1: t1, @@ -60,7 +63,7 @@ func NewARC(size int) (*ARCCache, error) { } // Get looks up a key's value from the cache. -func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { +func (c *ARCCache[K, V]) Get(key K) (value V, ok bool) { c.lock.Lock() defer c.lock.Unlock() @@ -78,11 +81,11 @@ func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { } // No hit - return nil, false + return } // Add adds a value to the cache. -func (c *ARCCache) Add(key, value interface{}) { +func (c *ARCCache[K, V]) Add(key K, value V) { c.lock.Lock() defer c.lock.Unlock() @@ -177,30 +180,30 @@ func (c *ARCCache) Add(key, value interface{}) { // replace is used to adaptively evict from either T1 or T2 // based on the current learned value of P -func (c *ARCCache) replace(b2ContainsKey bool) { +func (c *ARCCache[K, V]) replace(b2ContainsKey bool) { t1Len := c.t1.Len() if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { k, _, ok := c.t1.RemoveOldest() if ok { - c.b1.Add(k, nil) + c.b1.Add(k, struct{}{}) } } else { k, _, ok := c.t2.RemoveOldest() if ok { - c.b2.Add(k, nil) + c.b2.Add(k, struct{}{}) } } } // Len returns the number of cached entries -func (c *ARCCache) Len() int { +func (c *ARCCache[K, V]) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Len() + c.t2.Len() } // Keys returns all the cached keys -func (c *ARCCache) Keys() []interface{} { +func (c *ARCCache[K, V]) Keys() []K { c.lock.RLock() defer c.lock.RUnlock() k1 := c.t1.Keys() @@ -208,8 +211,17 @@ func (c *ARCCache) Keys() []interface{} { return append(k1, k2...) } +// Values returns all the cached values +func (c *ARCCache[K, V]) Values() []V { + c.lock.RLock() + defer c.lock.RUnlock() + v1 := c.t1.Values() + v2 := c.t2.Values() + return append(v1, v2...) +} + // Remove is used to purge a key from the cache -func (c *ARCCache) Remove(key interface{}) { +func (c *ARCCache[K, V]) Remove(key K) { c.lock.Lock() defer c.lock.Unlock() if c.t1.Remove(key) { @@ -227,7 +239,7 @@ func (c *ARCCache) Remove(key interface{}) { } // Purge is used to clear the cache -func (c *ARCCache) Purge() { +func (c *ARCCache[K, V]) Purge() { c.lock.Lock() defer c.lock.Unlock() c.t1.Purge() @@ -238,7 +250,7 @@ func (c *ARCCache) Purge() { // Contains is used to check if the cache contains a key // without updating recency or frequency. -func (c *ARCCache) Contains(key interface{}) bool { +func (c *ARCCache[K, V]) Contains(key K) bool { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Contains(key) || c.t2.Contains(key) @@ -246,7 +258,7 @@ func (c *ARCCache) Contains(key interface{}) bool { // Peek is used to inspect the cache value of a key // without updating recency or frequency. -func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { +func (c *ARCCache[K, V]) Peek(key K) (value V, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.t1.Peek(key); ok { diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go deleted file mode 100644 index 2547df979d..0000000000 --- a/vendor/github.com/hashicorp/golang-lru/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Package lru provides three different LRU caches of varying sophistication. -// -// Cache is a simple LRU cache. It is based on the -// LRU implementation in groupcache: -// https://github.com/golang/groupcache/tree/master/lru -// -// TwoQueueCache tracks frequently used and recently used entries separately. -// This avoids a burst of accesses from taking out frequently used entries, -// at the cost of about 2x computational overhead and some extra bookkeeping. -// -// ARCCache is an adaptive replacement cache. It tracks recent evictions as -// well as recent usage in both the frequent and recent caches. Its -// computational overhead is comparable to TwoQueueCache, but the memory -// overhead is linear with the size of the cache. -// -// ARC has been patented by IBM, so do not use it if that is problematic for -// your program. -// -// All caches in this package take locks while operating, and are therefore -// thread-safe for consumers. -package lru diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go deleted file mode 100644 index 895d8e3ea0..0000000000 --- a/vendor/github.com/hashicorp/golang-lru/lru.go +++ /dev/null @@ -1,231 +0,0 @@ -package lru - -import ( - "sync" - - "github.com/hashicorp/golang-lru/simplelru" -) - -const ( - // DefaultEvictedBufferSize defines the default buffer size to store evicted key/val - DefaultEvictedBufferSize = 16 -) - -// Cache is a thread-safe fixed size LRU cache. -type Cache struct { - lru *simplelru.LRU - evictedKeys, evictedVals []interface{} - onEvictedCB func(k, v interface{}) - lock sync.RWMutex -} - -// New creates an LRU of the given size. -func New(size int) (*Cache, error) { - return NewWithEvict(size, nil) -} - -// NewWithEvict constructs a fixed size cache with the given eviction -// callback. -func NewWithEvict(size int, onEvicted func(key, value interface{})) (c *Cache, err error) { - // create a cache with default settings - c = &Cache{ - onEvictedCB: onEvicted, - } - if onEvicted != nil { - c.initEvictBuffers() - onEvicted = c.onEvicted - } - c.lru, err = simplelru.NewLRU(size, onEvicted) - return -} - -func (c *Cache) initEvictBuffers() { - c.evictedKeys = make([]interface{}, 0, DefaultEvictedBufferSize) - c.evictedVals = make([]interface{}, 0, DefaultEvictedBufferSize) -} - -// onEvicted save evicted key/val and sent in externally registered callback -// outside of critical section -func (c *Cache) onEvicted(k, v interface{}) { - c.evictedKeys = append(c.evictedKeys, k) - c.evictedVals = append(c.evictedVals, v) -} - -// Purge is used to completely clear the cache. -func (c *Cache) Purge() { - var ks, vs []interface{} - c.lock.Lock() - c.lru.Purge() - if c.onEvictedCB != nil && len(c.evictedKeys) > 0 { - ks, vs = c.evictedKeys, c.evictedVals - c.initEvictBuffers() - } - c.lock.Unlock() - // invoke callback outside of critical section - if c.onEvictedCB != nil { - for i := 0; i < len(ks); i++ { - c.onEvictedCB(ks[i], vs[i]) - } - } -} - -// Add adds a value to the cache. Returns true if an eviction occurred. -func (c *Cache) Add(key, value interface{}) (evicted bool) { - var k, v interface{} - c.lock.Lock() - evicted = c.lru.Add(key, value) - if c.onEvictedCB != nil && evicted { - k, v = c.evictedKeys[0], c.evictedVals[0] - c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] - } - c.lock.Unlock() - if c.onEvictedCB != nil && evicted { - c.onEvictedCB(k, v) - } - return -} - -// Get looks up a key's value from the cache. -func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { - c.lock.Lock() - value, ok = c.lru.Get(key) - c.lock.Unlock() - return value, ok -} - -// Contains checks if a key is in the cache, without updating the -// recent-ness or deleting it for being stale. -func (c *Cache) Contains(key interface{}) bool { - c.lock.RLock() - containKey := c.lru.Contains(key) - c.lock.RUnlock() - return containKey -} - -// Peek returns the key value (or undefined if not found) without updating -// the "recently used"-ness of the key. -func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { - c.lock.RLock() - value, ok = c.lru.Peek(key) - c.lock.RUnlock() - return value, ok -} - -// ContainsOrAdd checks if a key is in the cache without updating the -// recent-ness or deleting it for being stale, and if not, adds the value. -// Returns whether found and whether an eviction occurred. -func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { - var k, v interface{} - c.lock.Lock() - if c.lru.Contains(key) { - c.lock.Unlock() - return true, false - } - evicted = c.lru.Add(key, value) - if c.onEvictedCB != nil && evicted { - k, v = c.evictedKeys[0], c.evictedVals[0] - c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] - } - c.lock.Unlock() - if c.onEvictedCB != nil && evicted { - c.onEvictedCB(k, v) - } - return false, evicted -} - -// PeekOrAdd checks if a key is in the cache without updating the -// recent-ness or deleting it for being stale, and if not, adds the value. -// Returns whether found and whether an eviction occurred. -func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) { - var k, v interface{} - c.lock.Lock() - previous, ok = c.lru.Peek(key) - if ok { - c.lock.Unlock() - return previous, true, false - } - evicted = c.lru.Add(key, value) - if c.onEvictedCB != nil && evicted { - k, v = c.evictedKeys[0], c.evictedVals[0] - c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] - } - c.lock.Unlock() - if c.onEvictedCB != nil && evicted { - c.onEvictedCB(k, v) - } - return nil, false, evicted -} - -// Remove removes the provided key from the cache. -func (c *Cache) Remove(key interface{}) (present bool) { - var k, v interface{} - c.lock.Lock() - present = c.lru.Remove(key) - if c.onEvictedCB != nil && present { - k, v = c.evictedKeys[0], c.evictedVals[0] - c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] - } - c.lock.Unlock() - if c.onEvictedCB != nil && present { - c.onEvictedCB(k, v) - } - return -} - -// Resize changes the cache size. -func (c *Cache) Resize(size int) (evicted int) { - var ks, vs []interface{} - c.lock.Lock() - evicted = c.lru.Resize(size) - if c.onEvictedCB != nil && evicted > 0 { - ks, vs = c.evictedKeys, c.evictedVals - c.initEvictBuffers() - } - c.lock.Unlock() - if c.onEvictedCB != nil && evicted > 0 { - for i := 0; i < len(ks); i++ { - c.onEvictedCB(ks[i], vs[i]) - } - } - return evicted -} - -// RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() (key, value interface{}, ok bool) { - var k, v interface{} - c.lock.Lock() - key, value, ok = c.lru.RemoveOldest() - if c.onEvictedCB != nil && ok { - k, v = c.evictedKeys[0], c.evictedVals[0] - c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] - } - c.lock.Unlock() - if c.onEvictedCB != nil && ok { - c.onEvictedCB(k, v) - } - return -} - -// GetOldest returns the oldest entry -func (c *Cache) GetOldest() (key, value interface{}, ok bool) { - c.lock.RLock() - key, value, ok = c.lru.GetOldest() - c.lock.RUnlock() - return -} - -// Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *Cache) Keys() []interface{} { - c.lock.RLock() - keys := c.lru.Keys() - c.lock.RUnlock() - return keys -} - -// Len returns the number of items in the cache. -func (c *Cache) Len() int { - c.lock.RLock() - length := c.lru.Len() - c.lock.RUnlock() - return length -} diff --git a/vendor/github.com/hashicorp/golang-lru/testing.go b/vendor/github.com/hashicorp/golang-lru/testing.go deleted file mode 100644 index 492760782c..0000000000 --- a/vendor/github.com/hashicorp/golang-lru/testing.go +++ /dev/null @@ -1,16 +0,0 @@ -package lru - -import ( - "crypto/rand" - "math" - "math/big" - "testing" -) - -func getRand(tb testing.TB) int64 { - out, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) - if err != nil { - tb.Fatal(err) - } - return out.Int64() -} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/LICENSE b/vendor/github.com/hashicorp/golang-lru/v2/LICENSE new file mode 100644 index 0000000000..0e5d580e0e --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/LICENSE @@ -0,0 +1,364 @@ +Copyright (c) 2014 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go new file mode 100644 index 0000000000..5cd74a0343 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go @@ -0,0 +1,142 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE_list file. + +package internal + +import "time" + +// Entry is an LRU Entry +type Entry[K comparable, V any] struct { + // Next and previous pointers in the doubly-linked list of elements. + // To simplify the implementation, internally a list l is implemented + // as a ring, such that &l.root is both the next element of the last + // list element (l.Back()) and the previous element of the first list + // element (l.Front()). + next, prev *Entry[K, V] + + // The list to which this element belongs. + list *LruList[K, V] + + // The LRU Key of this element. + Key K + + // The Value stored with this element. + Value V + + // The time this element would be cleaned up, optional + ExpiresAt time.Time + + // The expiry bucket item was put in, optional + ExpireBucket uint8 +} + +// PrevEntry returns the previous list element or nil. +func (e *Entry[K, V]) PrevEntry() *Entry[K, V] { + if p := e.prev; e.list != nil && p != &e.list.root { + return p + } + return nil +} + +// LruList represents a doubly linked list. +// The zero Value for LruList is an empty list ready to use. +type LruList[K comparable, V any] struct { + root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used + len int // current list Length excluding (this) sentinel element +} + +// Init initializes or clears list l. +func (l *LruList[K, V]) Init() *LruList[K, V] { + l.root.next = &l.root + l.root.prev = &l.root + l.len = 0 + return l +} + +// NewList returns an initialized list. +func NewList[K comparable, V any]() *LruList[K, V] { return new(LruList[K, V]).Init() } + +// Length returns the number of elements of list l. +// The complexity is O(1). +func (l *LruList[K, V]) Length() int { return l.len } + +// Back returns the last element of list l or nil if the list is empty. +func (l *LruList[K, V]) Back() *Entry[K, V] { + if l.len == 0 { + return nil + } + return l.root.prev +} + +// lazyInit lazily initializes a zero List Value. +func (l *LruList[K, V]) lazyInit() { + if l.root.next == nil { + l.Init() + } +} + +// insert inserts e after at, increments l.len, and returns e. +func (l *LruList[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] { + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e + e.list = l + l.len++ + return e +} + +// insertValue is a convenience wrapper for insert(&Entry{Value: v, ExpiresAt: ExpiresAt}, at). +func (l *LruList[K, V]) insertValue(k K, v V, expiresAt time.Time, at *Entry[K, V]) *Entry[K, V] { + return l.insert(&Entry[K, V]{Value: v, Key: k, ExpiresAt: expiresAt}, at) +} + +// Remove removes e from its list, decrements l.len +func (l *LruList[K, V]) Remove(e *Entry[K, V]) V { + e.prev.next = e.next + e.next.prev = e.prev + e.next = nil // avoid memory leaks + e.prev = nil // avoid memory leaks + e.list = nil + l.len-- + + return e.Value +} + +// move moves e to next to at. +func (l *LruList[K, V]) move(e, at *Entry[K, V]) { + if e == at { + return + } + e.prev.next = e.next + e.next.prev = e.prev + + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e +} + +// PushFront inserts a new element e with value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFront(k K, v V) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, time.Time{}, &l.root) +} + +// PushFrontExpirable inserts a new expirable element e with Value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFrontExpirable(k K, v V, expiresAt time.Time) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, expiresAt, &l.root) +} + +// MoveToFront moves element e to the front of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *LruList[K, V]) MoveToFront(e *Entry[K, V]) { + if e.list != l || l.root.next == e { + return + } + // see comment in List.Remove about initialization of l + l.move(e, &l.root) +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list new file mode 100644 index 0000000000..c4764e6b2f --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list @@ -0,0 +1,29 @@ +This license applies to simplelru/list.go + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go similarity index 50% rename from vendor/github.com/hashicorp/golang-lru/simplelru/lru.go rename to vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go index 9233583c91..f69792388c 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go @@ -1,46 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package simplelru import ( - "container/list" "errors" + + "github.com/hashicorp/golang-lru/v2/internal" ) // EvictCallback is used to get a callback when a cache entry is evicted -type EvictCallback func(key interface{}, value interface{}) +type EvictCallback[K comparable, V any] func(key K, value V) // LRU implements a non-thread safe fixed size LRU cache -type LRU struct { +type LRU[K comparable, V any] struct { size int - evictList *list.List - items map[interface{}]*list.Element - onEvict EvictCallback -} - -// entry is used to hold a value in the evictList -type entry struct { - key interface{} - value interface{} + evictList *internal.LruList[K, V] + items map[K]*internal.Entry[K, V] + onEvict EvictCallback[K, V] } // NewLRU constructs an LRU of the given size -func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { +func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K, V], error) { if size <= 0 { return nil, errors.New("must provide a positive size") } - c := &LRU{ + + c := &LRU[K, V]{ size: size, - evictList: list.New(), - items: make(map[interface{}]*list.Element), + evictList: internal.NewList[K, V](), + items: make(map[K]*internal.Entry[K, V]), onEvict: onEvict, } return c, nil } // Purge is used to completely clear the cache. -func (c *LRU) Purge() { +func (c *LRU[K, V]) Purge() { for k, v := range c.items { if c.onEvict != nil { - c.onEvict(k, v.Value.(*entry).value) + c.onEvict(k, v.Value) } delete(c.items, k) } @@ -48,20 +47,19 @@ func (c *LRU) Purge() { } // Add adds a value to the cache. Returns true if an eviction occurred. -func (c *LRU) Add(key, value interface{}) (evicted bool) { +func (c *LRU[K, V]) Add(key K, value V) (evicted bool) { // Check for existing item if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) - ent.Value.(*entry).value = value + ent.Value = value return false } // Add new item - ent := &entry{key, value} - entry := c.evictList.PushFront(ent) - c.items[key] = entry + ent := c.evictList.PushFront(key, value) + c.items[key] = ent - evict := c.evictList.Len() > c.size + evict := c.evictList.Length() > c.size // Verify size not exceeded if evict { c.removeOldest() @@ -70,37 +68,34 @@ func (c *LRU) Add(key, value interface{}) (evicted bool) { } // Get looks up a key's value from the cache. -func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { +func (c *LRU[K, V]) Get(key K) (value V, ok bool) { if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) - if ent.Value.(*entry) == nil { - return nil, false - } - return ent.Value.(*entry).value, true + return ent.Value, true } return } // Contains checks if a key is in the cache, without updating the recent-ness // or deleting it for being stale. -func (c *LRU) Contains(key interface{}) (ok bool) { +func (c *LRU[K, V]) Contains(key K) (ok bool) { _, ok = c.items[key] return ok } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. -func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { - var ent *list.Element +func (c *LRU[K, V]) Peek(key K) (value V, ok bool) { + var ent *internal.Entry[K, V] if ent, ok = c.items[key]; ok { - return ent.Value.(*entry).value, true + return ent.Value, true } - return nil, ok + return } // Remove removes the provided key from the cache, returning if the // key was contained. -func (c *LRU) Remove(key interface{}) (present bool) { +func (c *LRU[K, V]) Remove(key K) (present bool) { if ent, ok := c.items[key]; ok { c.removeElement(ent) return true @@ -109,44 +104,51 @@ func (c *LRU) Remove(key interface{}) (present bool) { } // RemoveOldest removes the oldest item from the cache. -func (c *LRU) RemoveOldest() (key, value interface{}, ok bool) { - ent := c.evictList.Back() - if ent != nil { +func (c *LRU[K, V]) RemoveOldest() (key K, value V, ok bool) { + if ent := c.evictList.Back(); ent != nil { c.removeElement(ent) - kv := ent.Value.(*entry) - return kv.key, kv.value, true + return ent.Key, ent.Value, true } - return nil, nil, false + return } // GetOldest returns the oldest entry -func (c *LRU) GetOldest() (key, value interface{}, ok bool) { - ent := c.evictList.Back() - if ent != nil { - kv := ent.Value.(*entry) - return kv.key, kv.value, true +func (c *LRU[K, V]) GetOldest() (key K, value V, ok bool) { + if ent := c.evictList.Back(); ent != nil { + return ent.Key, ent.Value, true } - return nil, nil, false + return } // Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *LRU) Keys() []interface{} { - keys := make([]interface{}, len(c.items)) +func (c *LRU[K, V]) Keys() []K { + keys := make([]K, c.evictList.Length()) i := 0 - for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { - keys[i] = ent.Value.(*entry).key + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + keys[i] = ent.Key i++ } return keys } +// Values returns a slice of the values in the cache, from oldest to newest. +func (c *LRU[K, V]) Values() []V { + values := make([]V, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + values[i] = ent.Value + i++ + } + return values +} + // Len returns the number of items in the cache. -func (c *LRU) Len() int { - return c.evictList.Len() +func (c *LRU[K, V]) Len() int { + return c.evictList.Length() } // Resize changes the cache size. -func (c *LRU) Resize(size int) (evicted int) { +func (c *LRU[K, V]) Resize(size int) (evicted int) { diff := c.Len() - size if diff < 0 { diff = 0 @@ -159,19 +161,17 @@ func (c *LRU) Resize(size int) (evicted int) { } // removeOldest removes the oldest item from the cache. -func (c *LRU) removeOldest() { - ent := c.evictList.Back() - if ent != nil { +func (c *LRU[K, V]) removeOldest() { + if ent := c.evictList.Back(); ent != nil { c.removeElement(ent) } } // removeElement is used to remove a given list element from the cache -func (c *LRU) removeElement(e *list.Element) { +func (c *LRU[K, V]) removeElement(e *internal.Entry[K, V]) { c.evictList.Remove(e) - kv := e.Value.(*entry) - delete(c.items, kv.key) + delete(c.items, e.Key) if c.onEvict != nil { - c.onEvict(kv.key, kv.value) + c.onEvict(e.Key, e.Value) } } diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go similarity index 68% rename from vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go rename to vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go index cb7f8caf03..043b8bcc3f 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go @@ -1,33 +1,39 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Package simplelru provides simple LRU implementation based on build-in container/list. package simplelru // LRUCache is the interface for simple LRU cache. -type LRUCache interface { +type LRUCache[K comparable, V any] interface { // Adds a value to the cache, returns true if an eviction occurred and // updates the "recently used"-ness of the key. - Add(key, value interface{}) bool + Add(key K, value V) bool // Returns key's value from the cache and // updates the "recently used"-ness of the key. #value, isFound - Get(key interface{}) (value interface{}, ok bool) + Get(key K) (value V, ok bool) // Checks if a key exists in cache without updating the recent-ness. - Contains(key interface{}) (ok bool) + Contains(key K) (ok bool) // Returns key's value without updating the "recently used"-ness of the key. - Peek(key interface{}) (value interface{}, ok bool) + Peek(key K) (value V, ok bool) // Removes a key from the cache. - Remove(key interface{}) bool + Remove(key K) bool // Removes the oldest entry from cache. - RemoveOldest() (interface{}, interface{}, bool) + RemoveOldest() (K, V, bool) // Returns the oldest entry from the cache. #key, value, isFound - GetOldest() (interface{}, interface{}, bool) + GetOldest() (K, V, bool) // Returns a slice of the keys in the cache, from oldest to newest. - Keys() []interface{} + Keys() []K + + // Values returns a slice of the values in the cache, from oldest to newest. + Values() []V // Returns the number of items in the cache. Len() int diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 7069ae44d7..c3897c7ca0 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -22,7 +22,7 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 1 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/LICENSE b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/auth/api_version.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/auth/api_version.go new file mode 100644 index 0000000000..c7d5a55d74 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/auth/api_version.go @@ -0,0 +1,20 @@ +package auth + +import ( + "net/http" + "strings" +) + +// APIVersions gets the API versions out of an HTTP response using the provided +// version header as the key for the HTTP header. +func APIVersions(resp *http.Response, versionHeader string) []string { + var versions []string + if versionHeader != "" { + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { + for _, version := range strings.Fields(supportedVersions) { + versions = append(versions, version) + } + } + } + return versions +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/auth/credential_store.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/auth/credential_store.go new file mode 100644 index 0000000000..f129e0ef31 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/auth/credential_store.go @@ -0,0 +1,17 @@ +package auth + +import "net/url" + +// CredentialStore is an interface for getting credentials for a given URL. +type CredentialStore interface { + // Basic returns basic auth for the given URL + Basic(*url.URL) (string, string) + + // RefreshToken returns a refresh token for the + // given URL and service + RefreshToken(*url.URL, string) string + + // SetRefreshToken sets the refresh token if none + // is provided for the given url and service + SetRefreshToken(realm *url.URL, service, token string) +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/auth/session.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/auth/session.go new file mode 100644 index 0000000000..a4a60608d2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/auth/session.go @@ -0,0 +1,32 @@ +package auth + +import ( + "fmt" + "strings" +) + +// Scope is a type which is serializable to a string +// using the allow scope grammar. +type Scope interface { + String() string +} + +// RepositoryScope represents a token scope for access +// to a repository. +type RepositoryScope struct { + Repository string + Class string + Actions []string +} + +// String returns the string representation of the repository +// using the scope grammar +func (rs RepositoryScope) String() string { + repoType := "repository" + // Keep existing format for image class to maintain backwards compatibility + // with authorization servers which do not support the expanded grammar. + if rs.Class != "" && rs.Class != "image" { + repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class) + } + return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ",")) +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/client.go similarity index 93% rename from vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go rename to vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/client.go index c7f961c724..fb4ca030c4 100644 --- a/vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/client.go @@ -13,27 +13,26 @@ import ( "sync" "time" - "golang.org/x/time/rate" - - "k8s.io/klog/v2" - "github.com/distribution/distribution/v3" - "github.com/distribution/distribution/v3/manifest/schema1" - "github.com/distribution/distribution/v3/reference" "github.com/distribution/distribution/v3/registry/api/errcode" - registryclient "github.com/distribution/distribution/v3/registry/client" - "github.com/distribution/distribution/v3/registry/client/auth" - "github.com/distribution/distribution/v3/registry/client/auth/challenge" - "github.com/distribution/distribution/v3/registry/client/transport" + "github.com/distribution/reference" "github.com/opencontainers/go-digest" + "golang.org/x/time/rate" + "k8s.io/klog/v2" imagereference "github.com/openshift/library-go/pkg/image/reference" + regauth "github.com/openshift/library-go/pkg/image/registryclient/v2/auth" + "github.com/openshift/library-go/pkg/image/registryclient/v2/clienterrors" + registryclient "github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client" + "github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth" + "github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth/challenge" + "github.com/openshift/library-go/pkg/image/registryclient/v2/transport" ) // CredentialStoreFactory is any entity capable of creating a CredentialStore based on an image // path (such as quay.io/fedora/fedora). type CredentialStoreFactory interface { - CredentialStoreFor(image string) auth.CredentialStore + CredentialStoreFor(image string) regauth.CredentialStore } // RepositoryRetriever fetches a Docker distribution.Repository. @@ -82,10 +81,10 @@ type Context struct { Transport http.RoundTripper InsecureTransport http.RoundTripper Challenges challenge.Manager - Scopes []auth.Scope + Scopes []regauth.Scope Actions []string Retries int - Credentials auth.CredentialStore + Credentials regauth.CredentialStore CredentialsFactory CredentialStoreFactory RequestModifiers []transport.RequestModifier Limiter *rate.Limiter @@ -134,7 +133,7 @@ func (c *Context) WithRateLimiter(limiter *rate.Limiter) *Context { return c } -func (c *Context) WithScopes(scopes ...auth.Scope) *Context { +func (c *Context) WithScopes(scopes ...regauth.Scope) *Context { c.Scopes = scopes return c } @@ -144,7 +143,7 @@ func (c *Context) WithActions(actions ...string) *Context { return c } -func (c *Context) WithCredentials(credentials auth.CredentialStore) *Context { +func (c *Context) WithCredentials(credentials regauth.CredentialStore) *Context { c.Credentials = credentials return c } @@ -331,7 +330,7 @@ func (c *Context) ping(registry url.URL, insecure bool, transport http.RoundTrip } defer resp.Body.Close() - versions := auth.APIVersions(resp, "Docker-Distribution-API-Version") + versions := regauth.APIVersions(resp, "Docker-Distribution-API-Version") if len(versions) == 0 { klog.V(5).Infof("Registry responded to v2 Docker endpoint, but has no header for Docker Distribution %s: %d, %#v", req.URL, resp.StatusCode, resp.Header) switch { @@ -368,7 +367,7 @@ func (s stringScope) String() string { return string(s) } // cachedTransport reuses an underlying transport for the given round tripper based // on the set of passed scopes. It will always return a transport that has at least the // provided scope list. -func (c *Context) cachedTransport(rt http.RoundTripper, host string, scopes []auth.Scope, ref imagereference.DockerImageReference) http.RoundTripper { +func (c *Context) cachedTransport(rt http.RoundTripper, host string, scopes []regauth.Scope, ref imagereference.DockerImageReference) http.RoundTripper { scopeNames := make(map[string]struct{}) for _, scope := range scopes { scopeNames[scope.String()] = struct{}{} @@ -388,9 +387,9 @@ func (c *Context) cachedTransport(rt http.RoundTripper, host string, scopes []au names = append(names, s) } sort.Strings(names) - scopes = make([]auth.Scope, 0, len(scopeNames)) + authScopes := make([]regauth.Scope, 0, len(scopeNames)) for _, s := range names { - scopes = append(scopes, stringScope(s)) + authScopes = append(authScopes, stringScope(s)) } creds := c.Credentials @@ -406,7 +405,7 @@ func (c *Context) cachedTransport(rt http.RoundTripper, host string, scopes []au auth.NewTokenHandlerWithOptions(auth.TokenHandlerOptions{ Transport: rt, Credentials: creds, - Scopes: scopes, + Scopes: authScopes, }), auth.NewBasicHandler(creds), ), @@ -422,13 +421,13 @@ func (c *Context) cachedTransport(rt http.RoundTripper, host string, scopes []au return t } -func (c *Context) scopes(repoName string) []auth.Scope { - scopes := make([]auth.Scope, 0, 1+len(c.Scopes)) +func (c *Context) scopes(repoName string) []regauth.Scope { + scopes := make([]regauth.Scope, 0, 1+len(c.Scopes)) scopes = append(scopes, c.Scopes...) if len(c.Actions) == 0 { - scopes = append(scopes, auth.RepositoryScope{Repository: repoName, Actions: []string{"pull"}}) + scopes = append(scopes, regauth.RepositoryScope{Repository: repoName, Actions: []string{"pull"}}) } else { - scopes = append(scopes, auth.RepositoryScope{Repository: repoName, Actions: c.Actions}) + scopes = append(scopes, regauth.RepositoryScope{Repository: repoName, Actions: c.Actions}) } return scopes } @@ -482,7 +481,7 @@ func isTemporaryHTTPError(err error) (time.Duration, bool) { case errcode.ErrorCodeTooManyRequests: return 2 * time.Second, true } - case *registryclient.UnexpectedHTTPResponseError: + case *clienterrors.UnexpectedHTTPResponseError: switch t.StatusCode { case http.StatusInternalServerError, http.StatusGatewayTimeout, http.StatusServiceUnavailable, http.StatusBadGateway: return 5 * time.Second, true @@ -723,20 +722,11 @@ func VerifyManifestIntegrity(manifest distribution.Manifest, dgst digest.Digest) // ContentDigestForManifest returns the digest in the provided algorithm of the supplied manifest's contents. func ContentDigestForManifest(manifest distribution.Manifest, algo digest.Algorithm) (digest.Digest, error) { - switch t := manifest.(type) { - case *schema1.SignedManifest: - // schema1 manifest digests are calculated from the payload - if len(t.Canonical) == 0 { - return "", fmt.Errorf("the schema1 manifest does not have a canonical representation") - } - return algo.FromBytes(t.Canonical), nil - default: - _, payload, err := manifest.Payload() - if err != nil { - return "", err - } - return algo.FromBytes(payload), nil + _, payload, err := manifest.Payload() + if err != nil { + return "", err } + return algo.FromBytes(payload), nil } // blobStoreVerifier wraps the blobs service and ensures that content retrieved by digest matches that digest. diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/client_mirrored.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/client_mirrored.go similarity index 98% rename from vendor/github.com/openshift/library-go/pkg/image/registryclient/client_mirrored.go rename to vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/client_mirrored.go index de8bf77735..404c4b1619 100644 --- a/vendor/github.com/openshift/library-go/pkg/image/registryclient/client_mirrored.go +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/client_mirrored.go @@ -11,13 +11,13 @@ import ( "github.com/distribution/distribution/v3" "github.com/distribution/distribution/v3/registry/api/errcode" - "github.com/distribution/distribution/v3/registry/client" - "github.com/distribution/distribution/v3/registry/client/auth" + distributionreference "github.com/distribution/reference" "github.com/opencontainers/go-digest" - "github.com/openshift/library-go/pkg/image/reference" "k8s.io/klog/v2" - distributionreference "github.com/distribution/distribution/v3/reference" + "github.com/openshift/library-go/pkg/image/reference" + "github.com/openshift/library-go/pkg/image/registryclient/v2/clienterrors" + "github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth" ) // AlternateBlobSourceStrategy is consulted when a repository cannot be reached to find alternate @@ -196,8 +196,8 @@ func (r *blobMirroredRepository) attemptRepos(ctx context.Context, repos []refer // nothing was copied. func isRequestError(err error) bool { var errorCode errcode.ErrorCode - responseError := &client.UnexpectedHTTPResponseError{} - statusError := &client.UnexpectedHTTPStatusError{} + responseError := &clienterrors.UnexpectedHTTPResponseError{} + statusError := &clienterrors.UnexpectedHTTPStatusError{} return errors.As(err, &errcode.Errors{}) || errors.As(err, &errcode.Error{}) || errors.As(err, &errorCode) || diff --git a/vendor/github.com/distribution/distribution/v3/registry/client/errors.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/clienterrors/errors.go similarity index 61% rename from vendor/github.com/distribution/distribution/v3/registry/client/errors.go rename to vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/clienterrors/errors.go index 8394ee0c20..fccec8f1a6 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/client/errors.go +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/clienterrors/errors.go @@ -1,14 +1,15 @@ -package client +package clienterrors import ( "encoding/json" "errors" "fmt" "io" + "mime" "net/http" "github.com/distribution/distribution/v3/registry/api/errcode" - "github.com/distribution/distribution/v3/registry/client/auth/challenge" + "github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth/challenge" ) // ErrNoErrorsInBody is returned when an HTTP response body parses to an empty @@ -37,13 +38,35 @@ func (e *UnexpectedHTTPResponseError) Error() string { return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) } -func parseHTTPErrorResponse(statusCode int, r io.Reader) error { +func parseHTTPErrorResponse(resp *http.Response) error { var errors errcode.Errors - body, err := io.ReadAll(r) + body, err := io.ReadAll(resp.Body) if err != nil { return err } + statusCode := resp.StatusCode + + // A HEAD request for example validly does not contain any body, while + // still returning a JSON content-type. + if len(body) == 0 { + return makeError(statusCode, "") + } + + ctHeader := resp.Header.Get("Content-Type") + if ctHeader == "" { + return makeError(statusCode, string(body)) + } + + contentType, _, err := mime.ParseMediaType(ctHeader) + if err != nil { + return fmt.Errorf("failed parsing content-type: %w", err) + } + + if contentType != "application/json" && contentType != "application/vnd.api+json" { + return makeError(statusCode, string(body)) + } + // For backward compatibility, handle irregularly formatted // messages that contain a "details" field. var detailsErr struct { @@ -51,16 +74,7 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error { } err = json.Unmarshal(body, &detailsErr) if err == nil && detailsErr.Details != "" { - switch statusCode { - case http.StatusUnauthorized: - return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) - case http.StatusForbidden: - return errcode.ErrorCodeDenied.WithMessage(detailsErr.Details) - case http.StatusTooManyRequests: - return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) - default: - return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) - } + return makeError(statusCode, detailsErr.Details) } if err := json.Unmarshal(body, &errors); err != nil { @@ -84,6 +98,19 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error { return errors } +func makeError(statusCode int, details string) error { + switch statusCode { + case http.StatusUnauthorized: + return errcode.ErrorCodeUnauthorized.WithMessage(details) + case http.StatusForbidden: + return errcode.ErrorCodeDenied.WithMessage(details) + case http.StatusTooManyRequests: + return errcode.ErrorCodeTooManyRequests.WithMessage(details) + default: + return errcode.ErrorCodeUnknown.WithMessage(details) + } +} + func makeErrorList(err error) []error { if errL, ok := err.(errcode.Errors); ok { return []error(errL) @@ -95,11 +122,16 @@ func mergeErrors(err1, err2 error) error { return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) } -// HandleErrorResponse returns error parsed from HTTP response for an -// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An -// UnexpectedHTTPStatusError returned for response code outside of expected -// range. -func HandleErrorResponse(resp *http.Response) error { +// HandleHTTPResponseError returns error parsed from HTTP response, if any. +// It returns nil if no error occurred (HTTP status 200-399), or an error +// for unsuccessful HTTP response codes (in the range 400 - 499 inclusive). +// If possible, it returns a typed error, but an UnexpectedHTTPStatusError +// is returned for response code outside the expected range (HTTP status < 200 +// and > 500). +func HandleHTTPResponseError(resp *http.Response) error { + if resp.StatusCode >= 200 && resp.StatusCode <= 399 { + return nil + } if resp.StatusCode >= 400 && resp.StatusCode < 500 { // Check for OAuth errors within the `WWW-Authenticate` header first // See https://tools.ietf.org/html/rfc6750#section-3 @@ -120,11 +152,10 @@ func HandleErrorResponse(resp *http.Response) error { } else { err.Message = err.Code.Message() } - - return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) + return mergeErrors(err, parseHTTPErrorResponse(resp)) } } - err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) + err := parseHTTPErrorResponse(resp) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) } @@ -133,8 +164,23 @@ func HandleErrorResponse(resp *http.Response) error { return &UnexpectedHTTPStatusError{Status: resp.Status} } +// HandleErrorResponse returns error parsed from HTTP response for an +// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An +// UnexpectedHTTPStatusError returned for response code outside of expected +// range. +// +// Deprecated: use [HandleHTTPResponseError] and check the error. +func HandleErrorResponse(resp *http.Response) error { + if resp.StatusCode >= 200 && resp.StatusCode <= 399 { + return &UnexpectedHTTPStatusError{Status: resp.Status} + } + return HandleHTTPResponseError(resp) +} + // SuccessStatus returns true if the argument is a successful HTTP response // code (in the range 200 - 399 inclusive). +// +// Deprecated: use [HandleHTTPResponseError] and check the error. func SuccessStatus(status int) bool { return status >= 200 && status <= 399 } diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/credentials.go similarity index 96% rename from vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials.go rename to vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/credentials.go index 6ca8e49eda..ea5fe15082 100644 --- a/vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials.go +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/credentials.go @@ -4,7 +4,7 @@ import ( "net/url" "sync" - "github.com/distribution/distribution/v3/registry/client/auth" + "github.com/openshift/library-go/pkg/image/registryclient/v2/auth" ) var ( diff --git a/vendor/github.com/distribution/distribution/v3/registry/client/auth/api_version.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth/api_version.go similarity index 100% rename from vendor/github.com/distribution/distribution/v3/registry/client/auth/api_version.go rename to vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth/api_version.go diff --git a/vendor/github.com/distribution/distribution/v3/registry/client/auth/challenge/addr.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth/challenge/addr.go similarity index 100% rename from vendor/github.com/distribution/distribution/v3/registry/client/auth/challenge/addr.go rename to vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth/challenge/addr.go diff --git a/vendor/github.com/distribution/distribution/v3/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth/challenge/authchallenge.go similarity index 99% rename from vendor/github.com/distribution/distribution/v3/registry/client/auth/challenge/authchallenge.go rename to vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth/challenge/authchallenge.go index 3dae9538ea..4d26e56881 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/client/auth/challenge/authchallenge.go +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth/challenge/authchallenge.go @@ -8,6 +8,48 @@ import ( "sync" ) +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + // Challenge carries information from a WWW-Authenticate response header. // See RFC 2617. type Challenge struct { @@ -30,7 +72,7 @@ type Manager interface { // AddResponse adds the response to the challenge // manager. The challenges will be parsed out of - // the WWW-Authenicate headers and added to the + // the WWW-Authenticate headers and added to the // URL which was produced the response. If the // response was authorized, any challenges for the // endpoint will be cleared. @@ -86,48 +128,6 @@ func (m *simpleManager) AddResponse(resp *http.Response) error { return nil } -// Octet types from RFC 2616. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) - if strings.ContainsRune(" \t\r\n", rune(c)) { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - // ResponseChallenges returns a list of authorization challenges // for the given http Response. Challenges are only checked if // the response status code was a 401. diff --git a/vendor/github.com/distribution/distribution/v3/registry/client/auth/session.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth/session.go similarity index 85% rename from vendor/github.com/distribution/distribution/v3/registry/client/auth/session.go rename to vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth/session.go index fe2128316b..10ad587bde 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/client/auth/session.go +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth/session.go @@ -11,9 +11,10 @@ import ( "sync" "time" - "github.com/distribution/distribution/v3/registry/client" - "github.com/distribution/distribution/v3/registry/client/auth/challenge" - "github.com/distribution/distribution/v3/registry/client/transport" + "github.com/openshift/library-go/pkg/image/registryclient/v2/auth" + "github.com/openshift/library-go/pkg/image/registryclient/v2/clienterrors" + "github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth/challenge" + "github.com/openshift/library-go/pkg/image/registryclient/v2/transport" ) var ( @@ -29,9 +30,9 @@ var ( const defaultClientID = "registry-client" // AuthenticationHandler is an interface for authorizing a request from -// params from a "WWW-Authenicate" header for a single scheme. +// params from a "WWW-Authenticate" header for a single scheme. type AuthenticationHandler interface { - // Scheme returns the scheme as expected from the "WWW-Authenicate" header. + // Scheme returns the scheme as expected from the "WWW-Authenticate" header. Scheme() string // AuthorizeRequest adds the authorization header to a request (if needed) @@ -40,21 +41,6 @@ type AuthenticationHandler interface { AuthorizeRequest(req *http.Request, params map[string]string) error } -// CredentialStore is an interface for getting credentials for -// a given URL -type CredentialStore interface { - // Basic returns basic auth for the given URL - Basic(*url.URL) (string, string) - - // RefreshToken returns a refresh token for the - // given URL and service - RefreshToken(*url.URL, string) string - - // SetRefreshToken sets the refresh token if none - // is provided for the given url and service - SetRefreshToken(realm *url.URL, service, token string) -} - // NewAuthorizer creates an authorizer which can handle multiple authentication // schemes. The handlers are tried in order, the higher priority authentication // methods should be first. The challengeMap holds a list of challenges for @@ -121,14 +107,14 @@ type clock interface { } type tokenHandler struct { - creds CredentialStore + creds auth.CredentialStore transport http.RoundTripper clock clock offlineAccess bool forceOAuth bool clientID string - scopes []Scope + scopes []auth.Scope tokenLock sync.Mutex tokenCache string @@ -137,32 +123,6 @@ type tokenHandler struct { logger Logger } -// Scope is a type which is serializable to a string -// using the allow scope grammar. -type Scope interface { - String() string -} - -// RepositoryScope represents a token scope for access -// to a repository. -type RepositoryScope struct { - Repository string - Class string - Actions []string -} - -// String returns the string representation of the repository -// using the scope grammar -func (rs RepositoryScope) String() string { - repoType := "repository" - // Keep existing format for image class to maintain backwards compatibility - // with authorization servers which do not support the expanded grammar. - if rs.Class != "" && rs.Class != "image" { - repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class) - } - return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ",")) -} - // RegistryScope represents a token scope for access // to resources in the registry. type RegistryScope struct { @@ -191,12 +151,12 @@ func logDebugf(logger Logger, format string, args ...interface{}) { // TokenHandlerOptions is used to configure a new token handler type TokenHandlerOptions struct { Transport http.RoundTripper - Credentials CredentialStore + Credentials auth.CredentialStore OfflineAccess bool ForceOAuth bool ClientID string - Scopes []Scope + Scopes []auth.Scope Logger Logger } @@ -208,13 +168,13 @@ func (realClock) Now() time.Time { return time.Now() } // NewTokenHandler creates a new AuthenicationHandler which supports // fetching tokens from a remote token server. -func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { +func NewTokenHandler(transport http.RoundTripper, creds auth.CredentialStore, scope string, actions ...string) AuthenticationHandler { // Create options... return NewTokenHandlerWithOptions(TokenHandlerOptions{ Transport: transport, Credentials: creds, - Scopes: []Scope{ - RepositoryScope{ + Scopes: []auth.Scope{ + auth.RepositoryScope{ Repository: scope, Actions: actions, }, @@ -253,7 +213,7 @@ func (th *tokenHandler) Scheme() string { func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { var additionalScopes []string if fromParam := req.URL.Query().Get("from"); fromParam != "" { - additionalScopes = append(additionalScopes, RepositoryScope{ + additionalScopes = append(additionalScopes, auth.RepositoryScope{ Repository: fromParam, Actions: []string{"pull"}, }.String()) @@ -360,8 +320,7 @@ func (th *tokenHandler) fetchTokenWithOAuth(ctx context.Context, realm *url.URL, } defer resp.Body.Close() - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) + if err := clienterrors.HandleHTTPResponseError(resp); err != nil { return "", time.Time{}, err } @@ -443,8 +402,7 @@ func (th *tokenHandler) fetchTokenWithBasicAuth(ctx context.Context, realm *url. } defer resp.Body.Close() - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) + if err := clienterrors.HandleHTTPResponseError(resp); err != nil { return "", time.Time{}, err } @@ -512,12 +470,12 @@ func (th *tokenHandler) fetchToken(ctx context.Context, params map[string]string } type basicHandler struct { - creds CredentialStore + creds auth.CredentialStore } // NewBasicHandler creaters a new authentiation handler which adds // basic authentication credentials to a request. -func NewBasicHandler(creds CredentialStore) AuthenticationHandler { +func NewBasicHandler(creds auth.CredentialStore) AuthenticationHandler { return &basicHandler{ creds: creds, } diff --git a/vendor/github.com/distribution/distribution/v3/registry/client/blob_writer.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/blob_writer.go similarity index 82% rename from vendor/github.com/distribution/distribution/v3/registry/client/blob_writer.go rename to vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/blob_writer.go index 7b467d844a..8c97f910c1 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/client/blob_writer.go +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/blob_writer.go @@ -9,6 +9,9 @@ import ( "time" "github.com/distribution/distribution/v3" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/openshift/library-go/pkg/image/registryclient/v2/clienterrors" ) type httpBlobUpload struct { @@ -33,7 +36,7 @@ func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUploadUnknown } - return HandleErrorResponse(resp) + return clienterrors.HandleHTTPResponseError(resp) } func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { @@ -43,13 +46,16 @@ func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { } defer req.Body.Close() + req.Header.Set("Content-Type", "application/octet-stream") + resp, err := hbu.client.Do(req) if err != nil { return 0, err } + defer resp.Body.Close() - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) + if err := hbu.handleErrorResponse(resp); err != nil { + return 0, err } hbu.uuid = resp.Header.Get("Docker-Upload-UUID") @@ -82,9 +88,10 @@ func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { if err != nil { return 0, err } + defer resp.Body.Close() - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) + if err := hbu.handleErrorResponse(resp); err != nil { + return 0, err } hbu.uuid = resp.Header.Get("Docker-Upload-UUID") @@ -116,11 +123,11 @@ func (hbu *httpBlobUpload) StartedAt() time.Time { return hbu.startedAt } -func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { +func (hbu *httpBlobUpload) Commit(ctx context.Context, desc v1.Descriptor) (v1.Descriptor, error) { // TODO(dmcgowan): Check if already finished, if so just fetch req, err := http.NewRequestWithContext(hbu.ctx, http.MethodPut, hbu.location, nil) if err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } values := req.URL.Query() @@ -129,12 +136,12 @@ func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descrip resp, err := hbu.client.Do(req) if err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } defer resp.Body.Close() - if !SuccessStatus(resp.StatusCode) { - return distribution.Descriptor{}, hbu.handleErrorResponse(resp) + if err := hbu.handleErrorResponse(resp); err != nil { + return v1.Descriptor{}, err } return hbu.statter.Stat(ctx, desc.Digest) @@ -151,7 +158,7 @@ func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { } defer resp.Body.Close() - if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { + if resp.StatusCode == http.StatusNotFound { return nil } return hbu.handleErrorResponse(resp) diff --git a/vendor/github.com/distribution/distribution/v3/registry/client/repository.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/repository.go similarity index 80% rename from vendor/github.com/distribution/distribution/v3/registry/client/repository.go rename to vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/repository.go index 31fc2e9237..971b943bfb 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/client/repository.go +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/repository.go @@ -15,12 +15,15 @@ import ( "time" "github.com/distribution/distribution/v3" - "github.com/distribution/distribution/v3/reference" v2 "github.com/distribution/distribution/v3/registry/api/v2" - "github.com/distribution/distribution/v3/registry/client/transport" "github.com/distribution/distribution/v3/registry/storage/cache" "github.com/distribution/distribution/v3/registry/storage/cache/memory" + "github.com/distribution/reference" "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/openshift/library-go/pkg/image/registryclient/v2/clienterrors" + "github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/transport" ) // Registry provides an interface for calling Repositories, which returns a catalog of repositories. @@ -89,8 +92,6 @@ type registry struct { // of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there // are no more entries func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { - var numFilled int - var returnErr error values := buildCatalogValues(len(entries), last) u, err := r.ub.BuildCatalogURL(values) @@ -108,28 +109,27 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri } defer resp.Body.Close() - if SuccessStatus(resp.StatusCode) { - var ctlg struct { - Repositories []string `json:"repositories"` - } - decoder := json.NewDecoder(resp.Body) + if err := clienterrors.HandleHTTPResponseError(resp); err != nil { + return 0, err + } - if err := decoder.Decode(&ctlg); err != nil { - return 0, err - } + var ctlg struct { + Repositories []string `json:"repositories"` + } + decoder := json.NewDecoder(resp.Body) - copy(entries, ctlg.Repositories) - numFilled = len(ctlg.Repositories) + if err := decoder.Decode(&ctlg); err != nil { + return 0, err + } - link := resp.Header.Get("Link") - if link == "" { - returnErr = io.EOF - } - } else { - return 0, HandleErrorResponse(resp) + copy(entries, ctlg.Repositories) + numFilled := len(ctlg.Repositories) + + if resp.Header.Get("Link") == "" { + return numFilled, io.EOF } - return numFilled, returnErr + return numFilled, nil } // NewRepository creates a new Repository for the given repository name and base URL. @@ -200,18 +200,17 @@ type tags struct { // All returns all tags func (t *tags) All(ctx context.Context) ([]string, error) { - var tags []string - listURLStr, err := t.ub.BuildTagsURL(t.name) if err != nil { - return tags, err + return nil, err } listURL, err := url.Parse(listURLStr) if err != nil { - return tags, err + return nil, err } + var allTags []string for { req, err := http.NewRequestWithContext(ctx, http.MethodGet, listURL.String(), nil) if err != nil { @@ -219,47 +218,47 @@ func (t *tags) All(ctx context.Context) ([]string, error) { } resp, err := t.client.Do(req) if err != nil { - return tags, err + return allTags, err } defer resp.Body.Close() - if SuccessStatus(resp.StatusCode) { - b, err := io.ReadAll(resp.Body) - if err != nil { - return tags, err - } + if err := clienterrors.HandleHTTPResponseError(resp); err != nil { + return allTags, err + } - tagsResponse := struct { - Tags []string `json:"tags"` - }{} - if err := json.Unmarshal(b, &tagsResponse); err != nil { - return tags, err - } - tags = append(tags, tagsResponse.Tags...) - if link := resp.Header.Get("Link"); link != "" { - firsLink, _, _ := strings.Cut(link, ";") - linkURL, err := url.Parse(strings.Trim(firsLink, "<>")) - if err != nil { - return tags, err - } + b, err := io.ReadAll(resp.Body) + if err != nil { + return allTags, err + } - listURL = listURL.ResolveReference(linkURL) - } else { - return tags, nil + tagsResponse := struct { + Tags []string `json:"tags"` + }{} + if err := json.Unmarshal(b, &tagsResponse); err != nil { + return allTags, err + } + allTags = append(allTags, tagsResponse.Tags...) + if link := resp.Header.Get("Link"); link != "" { + firsLink, _, _ := strings.Cut(link, ";") + linkURL, err := url.Parse(strings.Trim(firsLink, "<>")) + if err != nil { + return allTags, err } + + listURL = listURL.ResolveReference(linkURL) } else { - return tags, HandleErrorResponse(resp) + return allTags, nil } } } -func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { - desc := distribution.Descriptor{} +func descriptorFromResponse(response *http.Response) (v1.Descriptor, error) { + desc := v1.Descriptor{} headers := response.Header ctHeader := headers.Get("Content-Type") if ctHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") + return v1.Descriptor{}, errors.New("missing or empty Content-Type header") } desc.MediaType = ctHeader @@ -267,28 +266,28 @@ func descriptorFromResponse(response *http.Response) (distribution.Descriptor, e if digestHeader == "" { data, err := io.ReadAll(response.Body) if err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } _, desc, err := distribution.UnmarshalManifest(ctHeader, data) if err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } return desc, nil } dgst, err := digest.Parse(digestHeader) if err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } desc.Digest = dgst lengthHeader := headers.Get("Content-Length") if lengthHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") + return v1.Descriptor{}, errors.New("missing or empty Content-Length header") } length, err := strconv.ParseInt(lengthHeader, 10, 64) if err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } desc.Size = length @@ -298,14 +297,14 @@ func descriptorFromResponse(response *http.Response) (distribution.Descriptor, e // Get issues a HEAD request for a Manifest against its named endpoint in order // to construct a descriptor for the tag. If the registry doesn't support HEADing // a manifest, fallback to GET. -func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { +func (t *tags) Get(ctx context.Context, tag string) (v1.Descriptor, error) { ref, err := reference.WithTag(t.name, tag) if err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } u, err := t.ub.BuildManifestURL(ref) if err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } newRequest := func(method string) (*http.Response, error) { @@ -323,7 +322,7 @@ func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, er resp, err := newRequest(http.MethodHead) if err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } defer resp.Body.Close() @@ -338,22 +337,22 @@ func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, er // - to get error details in case of a failure resp, err = newRequest(http.MethodGet) if err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } defer resp.Body.Close() if resp.StatusCode >= 200 && resp.StatusCode < 400 { return descriptorFromResponse(resp) } - return distribution.Descriptor{}, HandleErrorResponse(resp) + return v1.Descriptor{}, clienterrors.HandleHTTPResponseError(resp) } } -func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { +func (t *tags) Lookup(ctx context.Context, digest v1.Descriptor) ([]string, error) { panic("not implemented") } -func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { +func (t *tags) Tag(ctx context.Context, tag string, desc v1.Descriptor) error { panic("not implemented") } @@ -378,10 +377,7 @@ func (t *tags) Untag(ctx context.Context, tag string) error { } defer resp.Body.Close() - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) + return clienterrors.HandleHTTPResponseError(resp) } type manifests struct { @@ -409,13 +405,16 @@ func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, erro if err != nil { return false, err } + defer resp.Body.Close() - if SuccessStatus(resp.StatusCode) { - return true, nil - } else if resp.StatusCode == http.StatusNotFound { + if resp.StatusCode == http.StatusNotFound { return false, nil } - return false, HandleErrorResponse(resp) + + if err := clienterrors.HandleHTTPResponseError(resp); err != nil { + return false, err + } + return true, nil } // AddEtagToTag allows a client to supply an eTag to Get which will be @@ -516,25 +515,27 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis defer resp.Body.Close() if resp.StatusCode == http.StatusNotModified { return nil, distribution.ErrManifestNotModified - } else if SuccessStatus(resp.StatusCode) { - if contentDgst != nil { - dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) - if err == nil { - *contentDgst = dgst - } - } - mt := resp.Header.Get("Content-Type") - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, err - } - m, _, err := distribution.UnmarshalManifest(mt, body) - if err != nil { - return nil, err + } + if err := clienterrors.HandleHTTPResponseError(resp); err != nil { + return nil, err + } + + if contentDgst != nil { + dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) + if err == nil { + *contentDgst = dgst } - return m, nil } - return nil, HandleErrorResponse(resp) + mt := resp.Header.Get("Content-Type") + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + m, _, err := distribution.UnmarshalManifest(mt, body) + if err != nil { + return nil, err + } + return m, nil } // Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the @@ -593,17 +594,16 @@ func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options . } defer resp.Body.Close() - if SuccessStatus(resp.StatusCode) { - dgstHeader := resp.Header.Get("Docker-Content-Digest") - dgst, err := digest.Parse(dgstHeader) - if err != nil { - return "", err - } + if err := clienterrors.HandleHTTPResponseError(resp); err != nil { + return "", err + } - return dgst, nil + dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) + if err != nil { + return "", err } - return "", HandleErrorResponse(resp) + return dgst, nil } func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { @@ -626,10 +626,7 @@ func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { } defer resp.Body.Close() - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) + return clienterrors.HandleHTTPResponseError(resp) } // todo(richardscothern): Restore interface and implementation with merge of #1050 @@ -660,7 +657,7 @@ func sanitizeLocation(location, base string) (string, error) { return baseURL.ResolveReference(locationURL).String(), nil } -func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { +func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) { return bs.statter.Stat(ctx, dgst) } @@ -688,7 +685,7 @@ func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (io.ReadSeekClose if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUnknown } - return HandleErrorResponse(resp) + return clienterrors.HandleHTTPResponseError(resp) }), nil } @@ -717,27 +714,25 @@ func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.R return err } -func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { +func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (v1.Descriptor, error) { writer, err := bs.Create(ctx) if err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } dgstr := digest.Canonical.Digester() n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) if err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } if n < int64(len(p)) { - return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) + return v1.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) } - desc := distribution.Descriptor{ + return writer.Commit(ctx, v1.Descriptor{ MediaType: mediaType, Size: int64(len(p)), Digest: dgstr.Digest(), - } - - return writer.Commit(ctx, desc) + }) } type optionFunc func(interface{}) error @@ -826,7 +821,7 @@ func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateO location: location, }, nil default: - return nil, HandleErrorResponse(resp) + return nil, clienterrors.HandleHTTPResponseError(resp) } } @@ -856,46 +851,49 @@ type blobStatter struct { client *http.Client } -func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) { ref, err := reference.WithDigest(bs.name, dgst) if err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } u, err := bs.ub.BuildBlobURL(ref) if err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } req, err := http.NewRequestWithContext(ctx, http.MethodHead, u, nil) if err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } resp, err := bs.client.Do(req) if err != nil { - return distribution.Descriptor{}, err + return v1.Descriptor{}, err } defer resp.Body.Close() - if SuccessStatus(resp.StatusCode) { - lengthHeader := resp.Header.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) - } + if resp.StatusCode == http.StatusNotFound { + return v1.Descriptor{}, distribution.ErrBlobUnknown + } - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) - } + if err := clienterrors.HandleHTTPResponseError(resp); err != nil { + return v1.Descriptor{}, err + } - return distribution.Descriptor{ - MediaType: resp.Header.Get("Content-Type"), - Size: length, - Digest: dgst, - }, nil - } else if resp.StatusCode == http.StatusNotFound { - return distribution.Descriptor{}, distribution.ErrBlobUnknown + lengthHeader := resp.Header.Get("Content-Length") + if lengthHeader == "" { + return v1.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) + } + + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return v1.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) } - return distribution.Descriptor{}, HandleErrorResponse(resp) + + return v1.Descriptor{ + MediaType: resp.Header.Get("Content-Type"), + Size: length, + Digest: dgst, + }, nil } func buildCatalogValues(maxEntries int, last string) url.Values { @@ -933,12 +931,9 @@ func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { } defer resp.Body.Close() - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) + return clienterrors.HandleHTTPResponseError(resp) } -func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error { return nil } diff --git a/vendor/github.com/distribution/distribution/v3/registry/client/transport/http_reader.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/transport/http_reader.go similarity index 82% rename from vendor/github.com/distribution/distribution/v3/registry/client/transport/http_reader.go rename to vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/transport/http_reader.go index 71671158ca..46577303e3 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/client/transport/http_reader.go +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/transport/http_reader.go @@ -1,13 +1,20 @@ package transport import ( + "compress/flate" + "compress/gzip" "context" "errors" "fmt" "io" + "math" "net/http" "regexp" "strconv" + "strings" + "unicode" + + "github.com/klauspost/compress/zstd" ) var ( @@ -19,11 +26,6 @@ var ( ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") ) -// ReadSeekCloser combines io.ReadSeeker with io.Closer. -// -// Deprecated: use [io.ReadSeekCloser]. -type ReadSeekCloser = io.ReadSeekCloser - // NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET // request. When seeking and starting a read from a non-zero offset // the a "Range" header will be added which sets the offset. @@ -163,7 +165,7 @@ func (hrs *HTTPReadSeeker) reset() { } } -func (hrs *HTTPReadSeeker) reader() (io.Reader, error) { +func (hrs *HTTPReadSeeker) reader() (_ io.Reader, retErr error) { if hrs.err != nil { return nil, hrs.err } @@ -184,10 +186,16 @@ func (hrs *HTTPReadSeeker) reader() (io.Reader, error) { // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) } + req.Header.Add("Accept-Encoding", "zstd, gzip, deflate") resp, err := hrs.client.Do(req) if err != nil { return nil, err } + defer func() { + if retErr != nil { + _ = resp.Body.Close() + } + }() // Normally would use client.SuccessStatus, but that would be a cyclic // import @@ -233,6 +241,9 @@ func (hrs *HTTPReadSeeker) reader() (io.Reader, error) { return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) } + if size > math.MaxInt64 { + return nil, fmt.Errorf("Content-Range size: %d exceeds max allowed size", size) + } hrs.size = int64(size) } } else if resp.StatusCode == http.StatusOK { @@ -240,10 +251,41 @@ func (hrs *HTTPReadSeeker) reader() (io.Reader, error) { } else { hrs.size = -1 } - hrs.rc = resp.Body + + body := resp.Body + encoding := strings.FieldsFunc(resp.Header.Get("Content-Encoding"), func(r rune) bool { + return unicode.IsSpace(r) || r == ',' + }) + for i := len(encoding) - 1; i >= 0; i-- { + algorithm := strings.ToLower(encoding[i]) + switch algorithm { + case "zstd": + r, err := zstd.NewReader(body) + if err != nil { + return nil, err + } + body = r.IOReadCloser() + case "gzip": + body, err = gzip.NewReader(body) + if err != nil { + return nil, err + } + case "deflate": + body = flate.NewReader(body) + case "": + // no content-encoding applied, use raw body + default: + return nil, errors.New("unsupported Content-Encoding algorithm: " + algorithm) + } + } + + hrs.rc = body } else { - defer resp.Body.Close() if hrs.errorHandler != nil { + // Closing the body should be handled by the existing defer, + // but in case a custom "errHandler" is used that doesn't return + // an error, we close the body regardless. + defer resp.Body.Close() return nil, hrs.errorHandler(resp) } return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) diff --git a/vendor/github.com/distribution/distribution/v3/manifest/schema1/config_builder.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/manifest/schema1/config_builder.go similarity index 93% rename from vendor/github.com/distribution/distribution/v3/manifest/schema1/config_builder.go rename to vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/manifest/schema1/config_builder.go index 77fdfee996..e84c54c3d1 100644 --- a/vendor/github.com/distribution/distribution/v3/manifest/schema1/config_builder.go +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/manifest/schema1/config_builder.go @@ -10,7 +10,7 @@ import ( "github.com/distribution/distribution/v3" "github.com/distribution/distribution/v3/manifest" - "github.com/distribution/distribution/v3/reference" + "github.com/distribution/reference" "github.com/docker/libtrust" "github.com/opencontainers/go-digest" ) @@ -28,9 +28,9 @@ var gzippedEmptyTar = []byte{ // gzippedEmptyTar const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") -// configManifestBuilder is a type for constructing manifests from an image +// ConfigManifestBuilder is a type for constructing manifests from an image // configuration and generic descriptors. -type configManifestBuilder struct { +type ConfigManifestBuilder struct { // bs is a BlobService used to create empty layer tars in the // blob store if necessary. bs distribution.BlobService @@ -56,8 +56,8 @@ type configManifestBuilder struct { // Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015, // use Docker Image Manifest v2, Schema 2, or the OCI Image Specification v1. // This package should not be used for purposes other than backward compatibility. -func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder { - return &configManifestBuilder{ +func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) *ConfigManifestBuilder { + return &ConfigManifestBuilder{ bs: bs, pk: pk, configJSON: configJSON, @@ -69,7 +69,7 @@ func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKe // // Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015. // Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification. -func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { +func (mb *ConfigManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { type imageRootFS struct { Type string `json:"type"` DiffIDs []diffID `json:"diff_ids,omitempty"` @@ -217,7 +217,7 @@ func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Mani // emptyTar pushes a compressed empty tar to the blob store if one doesn't // already exist, and returns its blobsum. -func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) { +func (mb *ConfigManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) { if mb.emptyTarDigest != "" { // Already put an empty tar return mb.emptyTarDigest, nil @@ -249,9 +249,7 @@ func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, e // // Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015. // Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification. -func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { - descriptor := d.Descriptor() - +func (mb *ConfigManifestBuilder) AppendReference(descriptor distribution.Descriptor) error { if err := descriptor.Digest.Validate(); err != nil { return err } @@ -264,7 +262,7 @@ func (mb *configManifestBuilder) AppendReference(d distribution.Describable) err // // Deprecated: Docker Image Manifest v2, Schema 1 is deprecated since 2015. // Use Docker Image Manifest v2, Schema 2, or the OCI Image Specification. -func (mb *configManifestBuilder) References() []distribution.Descriptor { +func (mb *ConfigManifestBuilder) References() []distribution.Descriptor { return mb.descriptors } diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/manifest/schema1/doc.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/manifest/schema1/doc.go new file mode 100644 index 0000000000..48e14aee37 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/manifest/schema1/doc.go @@ -0,0 +1,5 @@ +// This is a copy of github.com/distribution/distribution/v3/manifest/schema1. +// Only the necessary pieces very pulled in. +// Based on a41613ba3ab004719155db48e38897843dd7ece7 + +package schema1 diff --git a/vendor/github.com/distribution/distribution/v3/manifest/schema1/manifest.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/manifest/schema1/manifest.go similarity index 100% rename from vendor/github.com/distribution/distribution/v3/manifest/schema1/manifest.go rename to vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/manifest/schema1/manifest.go diff --git a/vendor/github.com/distribution/distribution/v3/manifest/schema1/sign.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/manifest/schema1/sign.go similarity index 100% rename from vendor/github.com/distribution/distribution/v3/manifest/schema1/sign.go rename to vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/manifest/schema1/sign.go diff --git a/vendor/github.com/distribution/distribution/v3/manifest/schema1/verify.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/manifest/schema1/verify.go similarity index 100% rename from vendor/github.com/distribution/distribution/v3/manifest/schema1/verify.go rename to vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/manifest/schema1/verify.go diff --git a/vendor/github.com/distribution/distribution/v3/registry/client/transport/transport.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/transport/transport.go similarity index 100% rename from vendor/github.com/distribution/distribution/v3/registry/client/transport/transport.go rename to vendor/github.com/openshift/library-go/pkg/image/registryclient/v2/transport/transport.go diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index b4607fe4d2..4067978a17 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -345,8 +345,8 @@ func (p *TextParser) startLabelName() stateFn { } // Special summary/histogram treatment. Don't add 'quantile' and 'le' // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) && + (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) { p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index bd3a39e3e1..460f554f29 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool { return a.ResolvedAt(time.Now()) } -// ResolvedAt returns true off the activity interval ended before +// ResolvedAt returns true iff the activity interval ended before // the given timestamp. func (a *Alert) ResolvedAt(ts time.Time) bool { if a.EndsAt.IsZero() { diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 73b7aa3e60..de83afe93e 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -22,7 +22,7 @@ import ( ) const ( - // AlertNameLabel is the name of the label containing the an alert's name. + // AlertNameLabel is the name of the label containing the alert's name. AlertNameLabel = "alertname" // ExportedLabelPrefix is the prefix to prepend to the label names present in @@ -122,7 +122,8 @@ func (ln LabelName) IsValidLegacy() bool { return false } for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + // TODO: Apply De Morgan's law. Make sure there are tests for this. + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck return false } } diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 5766107cf9..a6b01755bd 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -27,13 +27,25 @@ import ( ) var ( - // NameValidationScheme determines the method of name validation to be used by - // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 - // mode in isolation from other components that don't support UTF-8 may result - // in bugs or other undefined behavior. This value can be set to - // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To - // avoid need for locking, this value should be set once, ideally in an - // init(), before multiple goroutines are started. + // NameValidationScheme determines the global default method of the name + // validation to be used by all calls to IsValidMetricName() and LabelName + // IsValid(). + // + // Deprecated: This variable should not be used and might be removed in the + // far future. If you wish to stick to the legacy name validation use + // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods + // instead. This variable is here as an escape hatch for emergency cases, + // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., + // to delay UTF-8 migrations in time or aid in debugging unforeseen results of + // the change. In such a case, a temporary assignment to `LegacyValidation` + // value in the `init()` function in your main.go or so, could be considered. + // + // Historically we opted for a global variable for feature gating different + // validation schemes in operations that were not otherwise easily adjustable + // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate + // Labels structure or package might have been a better choice. Given the + // change was made and many upgraded the common already, we live this as-is + // with this warning and learning for the future. NameValidationScheme = UTF8Validation // NameEscapingScheme defines the default way that names will be escaped when @@ -50,7 +62,7 @@ var ( type ValidationScheme int const ( - // LegacyValidation is a setting that requirets that metric and label names + // LegacyValidation is a setting that requires that all metric and label names // conform to the original Prometheus character requirements described by // MetricNameRE and LabelNameRE. LegacyValidation ValidationScheme = iota diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 5727452c1e..fed9e87b91 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -201,6 +201,7 @@ var unitMap = map[string]struct { // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. +// Negative durations are not supported. func ParseDuration(s string) (Duration, error) { switch s { case "0": @@ -253,18 +254,36 @@ func ParseDuration(s string) (Duration, error) { return 0, errors.New("duration out of range") } } + return Duration(dur), nil } +// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations. +func ParseDurationAllowNegative(s string) (Duration, error) { + if s == "" || s[0] != '-' { + return ParseDuration(s) + } + + d, err := ParseDuration(s[1:]) + + return -d, err +} + func (d Duration) String() string { var ( - ms = int64(time.Duration(d) / time.Millisecond) - r = "" + ms = int64(time.Duration(d) / time.Millisecond) + r = "" + sign = "" ) + if ms == 0 { return "0s" } + if ms < 0 { + sign, ms = "-", -ms + } + f := func(unit string, mult int64, exact bool) { if exact && ms%mult != 0 { return @@ -286,7 +305,7 @@ func (d Duration) String() string { f("s", 1000, false) f("ms", 1, false) - return r + return sign + r } // MarshalJSON implements the json.Marshaler interface. diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index 126df9e67a..3c3bf910fd 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,22 +1,45 @@ ---- +version: "2" linters: enable: - - errcheck - - godot - - gosimple - - govet - - ineffassign - - misspell - - revive - - staticcheck - - testifylint - - unused - -linter-settings: - godot: - capital: true - exclude: - # Ignore "See: URL" - - 'See:' - misspell: - locale: US + - forbidigo + - godot + - misspell + - revive + - testifylint + settings: + forbidigo: + forbid: + - pattern: ^fmt\.Print.*$ + msg: Do not commit print statements. + godot: + exclude: + # Ignore "See: URL". + - 'See:' + capital: true + misspell: + locale: US + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + settings: + goimports: + local-prefixes: + - github.com/prometheus/procfs + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 1617292350..4de21512ff 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -61,7 +61,8 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.59.0 +GOLANGCI_LINT_VERSION ?= v2.1.5 +GOLANGCI_FMT_OPTS ?= # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -156,9 +157,13 @@ $(GOTEST_DIR): @mkdir -p $@ .PHONY: common-format -common-format: +common-format: $(GOLANGCI_LINT) @echo ">> formatting code" $(GO) fmt $(pkgs) +ifdef GOLANGCI_LINT + @echo ">> formatting code with golangci-lint" + $(GOLANGCI_LINT) fmt $(GOLANGCI_FMT_OPTS) +endif .PHONY: common-vet common-vet: @@ -248,8 +253,8 @@ $(PROMU): cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu rm -r $(PROMU_TMP) -.PHONY: proto -proto: +.PHONY: common-proto +common-proto: @echo ">> generating code from proto files" @./scripts/genproto.sh @@ -275,3 +280,9 @@ $(1)_precheck: exit 1; \ fi endef + +govulncheck: install-govulncheck + govulncheck ./... + +install-govulncheck: + command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md index 1224816c2a..0718239cf1 100644 --- a/vendor/github.com/prometheus/procfs/README.md +++ b/vendor/github.com/prometheus/procfs/README.md @@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`. The procfs library includes a set of test fixtures which include many example files from the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file which is extracted automatically during testing. To add/update the test fixtures, first -ensure the `fixtures` directory is up to date by removing the existing directory and then -extracting the ttar file using `make fixtures/.unpacked` or just `make test`. +ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`. ```bash rm -rf testdata/fixtures make test ``` -Next, make the required changes to the extracted files in the `fixtures` directory. When +Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file based on the updated `fixtures` directory. And finally, verify the changes using `git diff testdata/fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index cdcc8a7ccc..2e53344151 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -23,9 +23,9 @@ import ( // Learned from include/uapi/linux/if_arp.h. const ( - // completed entry (ha valid). + // Completed entry (ha valid). ATFComplete = 0x02 - // permanent entry. + // Permanent entry. ATFPermanent = 0x04 // Publish entry. ATFPublish = 0x08 diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 4980c875bf..9bdaccc7c8 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -24,8 +24,14 @@ type FS struct { isReal bool } -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = fs.DefaultProcMountPoint +const ( + // DefaultMountPoint is the common mount point of the proc filesystem. + DefaultMountPoint = fs.DefaultProcMountPoint + + // SectorSize represents the size of a sector in bytes. + // It is specific to Linux block I/O operations. + SectorSize = 512 +) // NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. // It will error if the mount point directory can't be read or is a file. diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 134767d69a..1b5bdbdf84 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -17,7 +17,7 @@ package procfs // isRealProc returns true on architectures that don't have a Type argument -// in their Statfs_t struct -func isRealProc(mountPoint string) (bool, error) { +// in their Statfs_t struct. +func isRealProc(_ string) (bool, error) { return true, nil } diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index cf2e3eaa03..7db8633077 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -162,7 +162,7 @@ type Fscacheinfo struct { ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 // Number of release reqs ignored due to in-progress store ReleaseRequestsIgnoredDueToInProgressStore uint64 - // Number of page stores cancelled due to release req + // Number of page stores canceled due to release req PageStoresCancelledByReleaseRequests uint64 VmscanWaiting uint64 // Number of times async ops added to pending queues @@ -171,11 +171,11 @@ type Fscacheinfo struct { OpsRunning uint64 // Number of times async ops queued for processing OpsEnqueued uint64 - // Number of async ops cancelled + // Number of async ops canceled OpsCancelled uint64 // Number of async ops rejected due to object lookup/create failure OpsRejected uint64 - // Number of async ops initialised + // Number of async ops initialized OpsInitialised uint64 // Number of async ops queued for deferred release OpsDeferred uint64 diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 3c18c7610e..3a43e83915 100644 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -28,6 +28,9 @@ const ( // DefaultConfigfsMountPoint is the common mount point of the configfs. DefaultConfigfsMountPoint = "/sys/kernel/config" + + // DefaultSelinuxMountPoint is the common mount point of the selinuxfs. + DefaultSelinuxMountPoint = "/sys/fs/selinux" ) // FS represents a pseudo-filesystem, normally /proc or /sys, which provides an diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 14272dc788..5a7d2df06a 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -14,6 +14,7 @@ package util import ( + "errors" "os" "strconv" "strings" @@ -110,3 +111,16 @@ func ParseBool(b string) *bool { } return &truth } + +// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX. +func ReadHexFromFile(path string) (uint64, error) { + data, err := os.ReadFile(path) + if err != nil { + return 0, err + } + hexString := strings.TrimSpace(string(data)) + if !strings.HasPrefix(hexString, "0x") { + return 0, errors.New("invalid format: hex string does not start with '0x'") + } + return strconv.ParseUint(hexString[2:], 16, 64) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index 1ab875ceec..d5404a6d72 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -20,6 +20,8 @@ package util import ( "bytes" "os" + "strconv" + "strings" "syscall" ) @@ -48,3 +50,21 @@ func SysReadFile(file string) (string, error) { return string(bytes.TrimSpace(b[:n])), nil } + +// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it. +func SysReadUintFromFile(path string) (uint64, error) { + data, err := SysReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it. +func SysReadIntFromFile(path string) (int64, error) { + data, err := SysReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 67a9d2b448..1fd4381b22 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -123,13 +123,16 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { finish := float64(0) pct := float64(0) recovering := strings.Contains(lines[syncLineIdx], "recovery") + reshaping := strings.Contains(lines[syncLineIdx], "reshape") resyncing := strings.Contains(lines[syncLineIdx], "resync") checking := strings.Contains(lines[syncLineIdx], "check") // Append recovery and resyncing state info. - if recovering || resyncing || checking { + if recovering || resyncing || checking || reshaping { if recovering { state = "recovering" + } else if reshaping { + state = "reshaping" } else if checking { state = "checking" } else { diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go index 4b2c4050a3..937e1f9606 100644 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -66,6 +66,10 @@ type Meminfo struct { // Memory which has been evicted from RAM, and is temporarily // on the disk SwapFree *uint64 + // Memory consumed by the zswap backend (compressed size) + Zswap *uint64 + // Amount of anonymous memory stored in zswap (original size) + Zswapped *uint64 // Memory which is waiting to get written back to the disk Dirty *uint64 // Memory which is actively being written back to the disk @@ -85,6 +89,8 @@ type Meminfo struct { // amount of memory dedicated to the lowest level of page // tables. PageTables *uint64 + // secondary page tables. + SecPageTables *uint64 // NFS pages sent to the server, but not yet committed to // stable storage NFSUnstable *uint64 @@ -129,15 +135,18 @@ type Meminfo struct { Percpu *uint64 HardwareCorrupted *uint64 AnonHugePages *uint64 + FileHugePages *uint64 ShmemHugePages *uint64 ShmemPmdMapped *uint64 CmaTotal *uint64 CmaFree *uint64 + Unaccepted *uint64 HugePagesTotal *uint64 HugePagesFree *uint64 HugePagesRsvd *uint64 HugePagesSurp *uint64 Hugepagesize *uint64 + Hugetlb *uint64 DirectMap4k *uint64 DirectMap2M *uint64 DirectMap1G *uint64 @@ -161,6 +170,8 @@ type Meminfo struct { MlockedBytes *uint64 SwapTotalBytes *uint64 SwapFreeBytes *uint64 + ZswapBytes *uint64 + ZswappedBytes *uint64 DirtyBytes *uint64 WritebackBytes *uint64 AnonPagesBytes *uint64 @@ -171,6 +182,7 @@ type Meminfo struct { SUnreclaimBytes *uint64 KernelStackBytes *uint64 PageTablesBytes *uint64 + SecPageTablesBytes *uint64 NFSUnstableBytes *uint64 BounceBytes *uint64 WritebackTmpBytes *uint64 @@ -182,11 +194,14 @@ type Meminfo struct { PercpuBytes *uint64 HardwareCorruptedBytes *uint64 AnonHugePagesBytes *uint64 + FileHugePagesBytes *uint64 ShmemHugePagesBytes *uint64 ShmemPmdMappedBytes *uint64 CmaTotalBytes *uint64 CmaFreeBytes *uint64 + UnacceptedBytes *uint64 HugepagesizeBytes *uint64 + HugetlbBytes *uint64 DirectMap4kBytes *uint64 DirectMap2MBytes *uint64 DirectMap1GBytes *uint64 @@ -287,6 +302,12 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { case "SwapFree:": m.SwapFree = &val m.SwapFreeBytes = &valBytes + case "Zswap:": + m.Zswap = &val + m.ZswapBytes = &valBytes + case "Zswapped:": + m.Zswapped = &val + m.ZswapBytes = &valBytes case "Dirty:": m.Dirty = &val m.DirtyBytes = &valBytes @@ -317,6 +338,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { case "PageTables:": m.PageTables = &val m.PageTablesBytes = &valBytes + case "SecPageTables:": + m.SecPageTables = &val + m.SecPageTablesBytes = &valBytes case "NFS_Unstable:": m.NFSUnstable = &val m.NFSUnstableBytes = &valBytes @@ -350,6 +374,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { case "AnonHugePages:": m.AnonHugePages = &val m.AnonHugePagesBytes = &valBytes + case "FileHugePages:": + m.FileHugePages = &val + m.FileHugePagesBytes = &valBytes case "ShmemHugePages:": m.ShmemHugePages = &val m.ShmemHugePagesBytes = &valBytes @@ -362,6 +389,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { case "CmaFree:": m.CmaFree = &val m.CmaFreeBytes = &valBytes + case "Unaccepted:": + m.Unaccepted = &val + m.UnacceptedBytes = &valBytes case "HugePages_Total:": m.HugePagesTotal = &val case "HugePages_Free:": @@ -373,6 +403,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { case "Hugepagesize:": m.Hugepagesize = &val m.HugepagesizeBytes = &valBytes + case "Hugetlb:": + m.Hugetlb = &val + m.HugetlbBytes = &valBytes case "DirectMap4k:": m.DirectMap4k = &val m.DirectMap4kBytes = &valBytes diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 75a3b6c810..50caa73274 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -45,11 +45,11 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 - // kernel version >= 4.14 MaxLen + // Kernel version >= 4.14 MaxLen // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 fieldTransport11RDMAMaxLen = 28 - // kernel version <= 4.2 MinLen + // Kernel version <= 4.2 MinLen // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 fieldTransport11RDMAMinLen = 20 ) @@ -601,11 +601,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats switch statVersion { case statVersion10: var expectedLength int - if protocol == "tcp" { + switch protocol { + case "tcp": expectedLength = fieldTransport10TCPLen - } else if protocol == "udp" { + case "udp": expectedLength = fieldTransport10UDPLen - } else { + default: return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { @@ -613,13 +614,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } case statVersion11: var expectedLength int - if protocol == "tcp" { + switch protocol { + case "tcp": expectedLength = fieldTransport11TCPLen - } else if protocol == "udp" { + case "udp": expectedLength = fieldTransport11UDPLen - } else if protocol == "rdma" { + case "rdma": expectedLength = fieldTransport11RDMAMinLen - } else { + default: return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || @@ -655,11 +657,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // For the udp RPC transport there is no connection count, connect idle time, // or idle time (fields #3, #4, and #5); all other fields are the same. So // we set them to 0 here. - if protocol == "udp" { + switch protocol { + case "udp": ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) - } else if protocol == "tcp" { + case "tcp": ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) - } else if protocol == "rdma" { + case "rdma": ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } diff --git a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go new file mode 100644 index 0000000000..f50b38e352 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "io" + "os" + "strconv" + "strings" +) + +// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc//net/dev_snmp6/. +// The outer map's keys are interface names and the inner map's keys are stat names. +// +// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type. +type NetDevSNMP6 map[string]map[string]uint64 + +// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/ +// directory. +func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6")) +} + +// Returns kernel/system statistics read from interface files within the /proc//net/dev_snmp6/ +// directory. +func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(p.path("net/dev_snmp6")) +} + +// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory. +func newNetDevSNMP6(dir string) (NetDevSNMP6, error) { + netDevSNMP6 := make(NetDevSNMP6) + + // The net/dev_snmp6 folders contain one file per interface + ifaceFiles, err := os.ReadDir(dir) + if err != nil { + // On systems with IPv6 disabled, this directory won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return netDevSNMP6, err + } + return netDevSNMP6, err + } + + for _, iFaceFile := range ifaceFiles { + f, err := os.Open(dir + "/" + iFaceFile.Name()) + if err != nil { + return netDevSNMP6, err + } + defer f.Close() + + netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f) + if err != nil { + return netDevSNMP6, err + } + } + + return netDevSNMP6, nil +} + +func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) { + m := make(map[string]uint64) + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + stat := strings.Fields(scanner.Text()) + if len(stat) < 2 { + continue + } + key, val := stat[0], stat[1] + + // Expect stat name to contain "6" or be "ifIndex" + if strings.Contains(key, "6") || key == "ifIndex" { + v, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return m, err + } + + m[key] = v + } + } + return m, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index b70f1fc7a4..19e3378f72 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -25,7 +25,7 @@ import ( ) const ( - // readLimit is used by io.LimitReader while reading the content of the + // Maximum size limit used by io.LimitReader while reading the content of the // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic // as each line represents a single used socket. // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. @@ -50,12 +50,12 @@ type ( // UsedSockets shows the total number of parsed lines representing the // number of used sockets. UsedSockets uint64 - // Drops shows the total number of dropped packets of all UPD sockets. + // Drops shows the total number of dropped packets of all UDP sockets. Drops *uint64 } - // netIPSocketLine represents the fields parsed from a single line - // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // A single line parser for fields from /proc/net/{t,u}dp{,6}. + // Fields which are not used by IPSocket are skipped. // Drops is non-nil for udp{,6}, but nil for tcp{,6}. // For the proc file format details, see https://linux.die.net/man/5/proc. netIPSocketLine struct { diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go index b6c77b709f..8d4b1ac05b 100644 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -115,22 +115,24 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro if err != nil { return nil, err } - if fields[4] == enabled { + switch fields[4] { + case enabled: line.Pressure = 1 - } else if fields[4] == disabled { + case disabled: line.Pressure = 0 - } else { + default: line.Pressure = -1 } line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) if err != nil { return nil, err } - if fields[6] == enabled { + switch fields[6] { + case enabled: line.Slab = true - } else if fields[6] == disabled { + case disabled: line.Slab = false - } else { + default: return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) } line.ModuleName = fields[7] @@ -168,11 +170,12 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro } for i := 0; i < len(capabilities); i++ { - if capabilities[i] == "y" { + switch capabilities[i] { + case "y": *capabilityFields[i] = true - } else if capabilities[i] == "n" { + case "n": *capabilityFields[i] = false - } else { + default: return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) } } diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go index 5277629557..0396d72015 100644 --- a/vendor/github.com/prometheus/procfs/net_tcp.go +++ b/vendor/github.com/prometheus/procfs/net_tcp.go @@ -25,24 +25,28 @@ type ( // NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCP() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp")) } // NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp6")) } // NetTCPSummary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp")) } // NetTCP6Summary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp6")) } diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index d868cebdaa..d7e0cacb4c 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { return &nu, nil } -func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { +func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) { fields := strings.Fields(line) l := len(fields) - if l < min { - return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l) + if l < minFields { + return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l) } // Field offsets are as follows: @@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, } // Path field is optional. - if l > min { + if l > minFields { // Path occurs at either index 6 or 7 depending on whether inode is // already present. pathIdx := 7 diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 142796368f..368187fa88 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -37,9 +37,9 @@ type Proc struct { type Procs []Proc var ( - ErrFileParse = errors.New("Error Parsing File") - ErrFileRead = errors.New("Error Reading File") - ErrMountPoint = errors.New("Error Accessing Mount point") + ErrFileParse = errors.New("error parsing file") + ErrFileRead = errors.New("error reading file") + ErrMountPoint = errors.New("error accessing mount point") ) func (p Procs) Len() int { return len(p) } @@ -79,7 +79,7 @@ func (fs FS) Self() (Proc, error) { if err != nil { return Proc{}, err } - pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) + pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), "")) if err != nil { return Proc{}, err } diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index daeed7f571..4a64347c03 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -24,7 +24,7 @@ import ( ) // Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a -// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource +// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in // this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go index 776f349717..d15b66ddb6 100644 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) { ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" + "cancelled_write_bytes: %d\n" //nolint:misspell _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 8e3ff4d794..4248c1716e 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -209,232 +209,232 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { case "TcpExt": switch key { case "SyncookiesSent": - procNetstat.TcpExt.SyncookiesSent = &value + procNetstat.SyncookiesSent = &value case "SyncookiesRecv": - procNetstat.TcpExt.SyncookiesRecv = &value + procNetstat.SyncookiesRecv = &value case "SyncookiesFailed": - procNetstat.TcpExt.SyncookiesFailed = &value + procNetstat.SyncookiesFailed = &value case "EmbryonicRsts": - procNetstat.TcpExt.EmbryonicRsts = &value + procNetstat.EmbryonicRsts = &value case "PruneCalled": - procNetstat.TcpExt.PruneCalled = &value + procNetstat.PruneCalled = &value case "RcvPruned": - procNetstat.TcpExt.RcvPruned = &value + procNetstat.RcvPruned = &value case "OfoPruned": - procNetstat.TcpExt.OfoPruned = &value + procNetstat.OfoPruned = &value case "OutOfWindowIcmps": - procNetstat.TcpExt.OutOfWindowIcmps = &value + procNetstat.OutOfWindowIcmps = &value case "LockDroppedIcmps": - procNetstat.TcpExt.LockDroppedIcmps = &value + procNetstat.LockDroppedIcmps = &value case "ArpFilter": - procNetstat.TcpExt.ArpFilter = &value + procNetstat.ArpFilter = &value case "TW": - procNetstat.TcpExt.TW = &value + procNetstat.TW = &value case "TWRecycled": - procNetstat.TcpExt.TWRecycled = &value + procNetstat.TWRecycled = &value case "TWKilled": - procNetstat.TcpExt.TWKilled = &value + procNetstat.TWKilled = &value case "PAWSActive": - procNetstat.TcpExt.PAWSActive = &value + procNetstat.PAWSActive = &value case "PAWSEstab": - procNetstat.TcpExt.PAWSEstab = &value + procNetstat.PAWSEstab = &value case "DelayedACKs": - procNetstat.TcpExt.DelayedACKs = &value + procNetstat.DelayedACKs = &value case "DelayedACKLocked": - procNetstat.TcpExt.DelayedACKLocked = &value + procNetstat.DelayedACKLocked = &value case "DelayedACKLost": - procNetstat.TcpExt.DelayedACKLost = &value + procNetstat.DelayedACKLost = &value case "ListenOverflows": - procNetstat.TcpExt.ListenOverflows = &value + procNetstat.ListenOverflows = &value case "ListenDrops": - procNetstat.TcpExt.ListenDrops = &value + procNetstat.ListenDrops = &value case "TCPHPHits": - procNetstat.TcpExt.TCPHPHits = &value + procNetstat.TCPHPHits = &value case "TCPPureAcks": - procNetstat.TcpExt.TCPPureAcks = &value + procNetstat.TCPPureAcks = &value case "TCPHPAcks": - procNetstat.TcpExt.TCPHPAcks = &value + procNetstat.TCPHPAcks = &value case "TCPRenoRecovery": - procNetstat.TcpExt.TCPRenoRecovery = &value + procNetstat.TCPRenoRecovery = &value case "TCPSackRecovery": - procNetstat.TcpExt.TCPSackRecovery = &value + procNetstat.TCPSackRecovery = &value case "TCPSACKReneging": - procNetstat.TcpExt.TCPSACKReneging = &value + procNetstat.TCPSACKReneging = &value case "TCPSACKReorder": - procNetstat.TcpExt.TCPSACKReorder = &value + procNetstat.TCPSACKReorder = &value case "TCPRenoReorder": - procNetstat.TcpExt.TCPRenoReorder = &value + procNetstat.TCPRenoReorder = &value case "TCPTSReorder": - procNetstat.TcpExt.TCPTSReorder = &value + procNetstat.TCPTSReorder = &value case "TCPFullUndo": - procNetstat.TcpExt.TCPFullUndo = &value + procNetstat.TCPFullUndo = &value case "TCPPartialUndo": - procNetstat.TcpExt.TCPPartialUndo = &value + procNetstat.TCPPartialUndo = &value case "TCPDSACKUndo": - procNetstat.TcpExt.TCPDSACKUndo = &value + procNetstat.TCPDSACKUndo = &value case "TCPLossUndo": - procNetstat.TcpExt.TCPLossUndo = &value + procNetstat.TCPLossUndo = &value case "TCPLostRetransmit": - procNetstat.TcpExt.TCPLostRetransmit = &value + procNetstat.TCPLostRetransmit = &value case "TCPRenoFailures": - procNetstat.TcpExt.TCPRenoFailures = &value + procNetstat.TCPRenoFailures = &value case "TCPSackFailures": - procNetstat.TcpExt.TCPSackFailures = &value + procNetstat.TCPSackFailures = &value case "TCPLossFailures": - procNetstat.TcpExt.TCPLossFailures = &value + procNetstat.TCPLossFailures = &value case "TCPFastRetrans": - procNetstat.TcpExt.TCPFastRetrans = &value + procNetstat.TCPFastRetrans = &value case "TCPSlowStartRetrans": - procNetstat.TcpExt.TCPSlowStartRetrans = &value + procNetstat.TCPSlowStartRetrans = &value case "TCPTimeouts": - procNetstat.TcpExt.TCPTimeouts = &value + procNetstat.TCPTimeouts = &value case "TCPLossProbes": - procNetstat.TcpExt.TCPLossProbes = &value + procNetstat.TCPLossProbes = &value case "TCPLossProbeRecovery": - procNetstat.TcpExt.TCPLossProbeRecovery = &value + procNetstat.TCPLossProbeRecovery = &value case "TCPRenoRecoveryFail": - procNetstat.TcpExt.TCPRenoRecoveryFail = &value + procNetstat.TCPRenoRecoveryFail = &value case "TCPSackRecoveryFail": - procNetstat.TcpExt.TCPSackRecoveryFail = &value + procNetstat.TCPSackRecoveryFail = &value case "TCPRcvCollapsed": - procNetstat.TcpExt.TCPRcvCollapsed = &value + procNetstat.TCPRcvCollapsed = &value case "TCPDSACKOldSent": - procNetstat.TcpExt.TCPDSACKOldSent = &value + procNetstat.TCPDSACKOldSent = &value case "TCPDSACKOfoSent": - procNetstat.TcpExt.TCPDSACKOfoSent = &value + procNetstat.TCPDSACKOfoSent = &value case "TCPDSACKRecv": - procNetstat.TcpExt.TCPDSACKRecv = &value + procNetstat.TCPDSACKRecv = &value case "TCPDSACKOfoRecv": - procNetstat.TcpExt.TCPDSACKOfoRecv = &value + procNetstat.TCPDSACKOfoRecv = &value case "TCPAbortOnData": - procNetstat.TcpExt.TCPAbortOnData = &value + procNetstat.TCPAbortOnData = &value case "TCPAbortOnClose": - procNetstat.TcpExt.TCPAbortOnClose = &value + procNetstat.TCPAbortOnClose = &value case "TCPDeferAcceptDrop": - procNetstat.TcpExt.TCPDeferAcceptDrop = &value + procNetstat.TCPDeferAcceptDrop = &value case "IPReversePathFilter": - procNetstat.TcpExt.IPReversePathFilter = &value + procNetstat.IPReversePathFilter = &value case "TCPTimeWaitOverflow": - procNetstat.TcpExt.TCPTimeWaitOverflow = &value + procNetstat.TCPTimeWaitOverflow = &value case "TCPReqQFullDoCookies": - procNetstat.TcpExt.TCPReqQFullDoCookies = &value + procNetstat.TCPReqQFullDoCookies = &value case "TCPReqQFullDrop": - procNetstat.TcpExt.TCPReqQFullDrop = &value + procNetstat.TCPReqQFullDrop = &value case "TCPRetransFail": - procNetstat.TcpExt.TCPRetransFail = &value + procNetstat.TCPRetransFail = &value case "TCPRcvCoalesce": - procNetstat.TcpExt.TCPRcvCoalesce = &value + procNetstat.TCPRcvCoalesce = &value case "TCPRcvQDrop": - procNetstat.TcpExt.TCPRcvQDrop = &value + procNetstat.TCPRcvQDrop = &value case "TCPOFOQueue": - procNetstat.TcpExt.TCPOFOQueue = &value + procNetstat.TCPOFOQueue = &value case "TCPOFODrop": - procNetstat.TcpExt.TCPOFODrop = &value + procNetstat.TCPOFODrop = &value case "TCPOFOMerge": - procNetstat.TcpExt.TCPOFOMerge = &value + procNetstat.TCPOFOMerge = &value case "TCPChallengeACK": - procNetstat.TcpExt.TCPChallengeACK = &value + procNetstat.TCPChallengeACK = &value case "TCPSYNChallenge": - procNetstat.TcpExt.TCPSYNChallenge = &value + procNetstat.TCPSYNChallenge = &value case "TCPFastOpenActive": - procNetstat.TcpExt.TCPFastOpenActive = &value + procNetstat.TCPFastOpenActive = &value case "TCPFastOpenActiveFail": - procNetstat.TcpExt.TCPFastOpenActiveFail = &value + procNetstat.TCPFastOpenActiveFail = &value case "TCPFastOpenPassive": - procNetstat.TcpExt.TCPFastOpenPassive = &value + procNetstat.TCPFastOpenPassive = &value case "TCPFastOpenPassiveFail": - procNetstat.TcpExt.TCPFastOpenPassiveFail = &value + procNetstat.TCPFastOpenPassiveFail = &value case "TCPFastOpenListenOverflow": - procNetstat.TcpExt.TCPFastOpenListenOverflow = &value + procNetstat.TCPFastOpenListenOverflow = &value case "TCPFastOpenCookieReqd": - procNetstat.TcpExt.TCPFastOpenCookieReqd = &value + procNetstat.TCPFastOpenCookieReqd = &value case "TCPFastOpenBlackhole": - procNetstat.TcpExt.TCPFastOpenBlackhole = &value + procNetstat.TCPFastOpenBlackhole = &value case "TCPSpuriousRtxHostQueues": - procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value + procNetstat.TCPSpuriousRtxHostQueues = &value case "BusyPollRxPackets": - procNetstat.TcpExt.BusyPollRxPackets = &value + procNetstat.BusyPollRxPackets = &value case "TCPAutoCorking": - procNetstat.TcpExt.TCPAutoCorking = &value + procNetstat.TCPAutoCorking = &value case "TCPFromZeroWindowAdv": - procNetstat.TcpExt.TCPFromZeroWindowAdv = &value + procNetstat.TCPFromZeroWindowAdv = &value case "TCPToZeroWindowAdv": - procNetstat.TcpExt.TCPToZeroWindowAdv = &value + procNetstat.TCPToZeroWindowAdv = &value case "TCPWantZeroWindowAdv": - procNetstat.TcpExt.TCPWantZeroWindowAdv = &value + procNetstat.TCPWantZeroWindowAdv = &value case "TCPSynRetrans": - procNetstat.TcpExt.TCPSynRetrans = &value + procNetstat.TCPSynRetrans = &value case "TCPOrigDataSent": - procNetstat.TcpExt.TCPOrigDataSent = &value + procNetstat.TCPOrigDataSent = &value case "TCPHystartTrainDetect": - procNetstat.TcpExt.TCPHystartTrainDetect = &value + procNetstat.TCPHystartTrainDetect = &value case "TCPHystartTrainCwnd": - procNetstat.TcpExt.TCPHystartTrainCwnd = &value + procNetstat.TCPHystartTrainCwnd = &value case "TCPHystartDelayDetect": - procNetstat.TcpExt.TCPHystartDelayDetect = &value + procNetstat.TCPHystartDelayDetect = &value case "TCPHystartDelayCwnd": - procNetstat.TcpExt.TCPHystartDelayCwnd = &value + procNetstat.TCPHystartDelayCwnd = &value case "TCPACKSkippedSynRecv": - procNetstat.TcpExt.TCPACKSkippedSynRecv = &value + procNetstat.TCPACKSkippedSynRecv = &value case "TCPACKSkippedPAWS": - procNetstat.TcpExt.TCPACKSkippedPAWS = &value + procNetstat.TCPACKSkippedPAWS = &value case "TCPACKSkippedSeq": - procNetstat.TcpExt.TCPACKSkippedSeq = &value + procNetstat.TCPACKSkippedSeq = &value case "TCPACKSkippedFinWait2": - procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value + procNetstat.TCPACKSkippedFinWait2 = &value case "TCPACKSkippedTimeWait": - procNetstat.TcpExt.TCPACKSkippedTimeWait = &value + procNetstat.TCPACKSkippedTimeWait = &value case "TCPACKSkippedChallenge": - procNetstat.TcpExt.TCPACKSkippedChallenge = &value + procNetstat.TCPACKSkippedChallenge = &value case "TCPWinProbe": - procNetstat.TcpExt.TCPWinProbe = &value + procNetstat.TCPWinProbe = &value case "TCPKeepAlive": - procNetstat.TcpExt.TCPKeepAlive = &value + procNetstat.TCPKeepAlive = &value case "TCPMTUPFail": - procNetstat.TcpExt.TCPMTUPFail = &value + procNetstat.TCPMTUPFail = &value case "TCPMTUPSuccess": - procNetstat.TcpExt.TCPMTUPSuccess = &value + procNetstat.TCPMTUPSuccess = &value case "TCPWqueueTooBig": - procNetstat.TcpExt.TCPWqueueTooBig = &value + procNetstat.TCPWqueueTooBig = &value } case "IpExt": switch key { case "InNoRoutes": - procNetstat.IpExt.InNoRoutes = &value + procNetstat.InNoRoutes = &value case "InTruncatedPkts": - procNetstat.IpExt.InTruncatedPkts = &value + procNetstat.InTruncatedPkts = &value case "InMcastPkts": - procNetstat.IpExt.InMcastPkts = &value + procNetstat.InMcastPkts = &value case "OutMcastPkts": - procNetstat.IpExt.OutMcastPkts = &value + procNetstat.OutMcastPkts = &value case "InBcastPkts": - procNetstat.IpExt.InBcastPkts = &value + procNetstat.InBcastPkts = &value case "OutBcastPkts": - procNetstat.IpExt.OutBcastPkts = &value + procNetstat.OutBcastPkts = &value case "InOctets": - procNetstat.IpExt.InOctets = &value + procNetstat.InOctets = &value case "OutOctets": - procNetstat.IpExt.OutOctets = &value + procNetstat.OutOctets = &value case "InMcastOctets": - procNetstat.IpExt.InMcastOctets = &value + procNetstat.InMcastOctets = &value case "OutMcastOctets": - procNetstat.IpExt.OutMcastOctets = &value + procNetstat.OutMcastOctets = &value case "InBcastOctets": - procNetstat.IpExt.InBcastOctets = &value + procNetstat.InBcastOctets = &value case "OutBcastOctets": - procNetstat.IpExt.OutBcastOctets = &value + procNetstat.OutBcastOctets = &value case "InCsumErrors": - procNetstat.IpExt.InCsumErrors = &value + procNetstat.InCsumErrors = &value case "InNoECTPkts": - procNetstat.IpExt.InNoECTPkts = &value + procNetstat.InNoECTPkts = &value case "InECT1Pkts": - procNetstat.IpExt.InECT1Pkts = &value + procNetstat.InECT1Pkts = &value case "InECT0Pkts": - procNetstat.IpExt.InECT0Pkts = &value + procNetstat.InECT0Pkts = &value case "InCEPkts": - procNetstat.IpExt.InCEPkts = &value + procNetstat.InCEPkts = &value case "ReasmOverlaps": - procNetstat.IpExt.ReasmOverlaps = &value + procNetstat.ReasmOverlaps = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index 09060e8208..9a297afcf8 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -19,7 +19,6 @@ package procfs import ( "bufio" "errors" - "fmt" "os" "regexp" "strconv" @@ -29,7 +28,7 @@ import ( ) var ( - // match the header line before each mapped zone in `/proc/pid/smaps`. + // Match the header line before each mapped zone in `/proc/pid/smaps`. procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) ) @@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { func (s *ProcSMapsRollup) parseLine(line string) error { kv := strings.SplitN(line, ":", 2) if len(kv) != 2 { - fmt.Println(line) return errors.New("invalid net/dev line, missing colon") } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index b9d2cf642a..4bdc90b07e 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -173,138 +173,138 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { case "Ip": switch key { case "Forwarding": - procSnmp.Ip.Forwarding = &value + procSnmp.Forwarding = &value case "DefaultTTL": - procSnmp.Ip.DefaultTTL = &value + procSnmp.DefaultTTL = &value case "InReceives": - procSnmp.Ip.InReceives = &value + procSnmp.InReceives = &value case "InHdrErrors": - procSnmp.Ip.InHdrErrors = &value + procSnmp.InHdrErrors = &value case "InAddrErrors": - procSnmp.Ip.InAddrErrors = &value + procSnmp.InAddrErrors = &value case "ForwDatagrams": - procSnmp.Ip.ForwDatagrams = &value + procSnmp.ForwDatagrams = &value case "InUnknownProtos": - procSnmp.Ip.InUnknownProtos = &value + procSnmp.InUnknownProtos = &value case "InDiscards": - procSnmp.Ip.InDiscards = &value + procSnmp.InDiscards = &value case "InDelivers": - procSnmp.Ip.InDelivers = &value + procSnmp.InDelivers = &value case "OutRequests": - procSnmp.Ip.OutRequests = &value + procSnmp.OutRequests = &value case "OutDiscards": - procSnmp.Ip.OutDiscards = &value + procSnmp.OutDiscards = &value case "OutNoRoutes": - procSnmp.Ip.OutNoRoutes = &value + procSnmp.OutNoRoutes = &value case "ReasmTimeout": - procSnmp.Ip.ReasmTimeout = &value + procSnmp.ReasmTimeout = &value case "ReasmReqds": - procSnmp.Ip.ReasmReqds = &value + procSnmp.ReasmReqds = &value case "ReasmOKs": - procSnmp.Ip.ReasmOKs = &value + procSnmp.ReasmOKs = &value case "ReasmFails": - procSnmp.Ip.ReasmFails = &value + procSnmp.ReasmFails = &value case "FragOKs": - procSnmp.Ip.FragOKs = &value + procSnmp.FragOKs = &value case "FragFails": - procSnmp.Ip.FragFails = &value + procSnmp.FragFails = &value case "FragCreates": - procSnmp.Ip.FragCreates = &value + procSnmp.FragCreates = &value } case "Icmp": switch key { case "InMsgs": - procSnmp.Icmp.InMsgs = &value + procSnmp.InMsgs = &value case "InErrors": procSnmp.Icmp.InErrors = &value case "InCsumErrors": procSnmp.Icmp.InCsumErrors = &value case "InDestUnreachs": - procSnmp.Icmp.InDestUnreachs = &value + procSnmp.InDestUnreachs = &value case "InTimeExcds": - procSnmp.Icmp.InTimeExcds = &value + procSnmp.InTimeExcds = &value case "InParmProbs": - procSnmp.Icmp.InParmProbs = &value + procSnmp.InParmProbs = &value case "InSrcQuenchs": - procSnmp.Icmp.InSrcQuenchs = &value + procSnmp.InSrcQuenchs = &value case "InRedirects": - procSnmp.Icmp.InRedirects = &value + procSnmp.InRedirects = &value case "InEchos": - procSnmp.Icmp.InEchos = &value + procSnmp.InEchos = &value case "InEchoReps": - procSnmp.Icmp.InEchoReps = &value + procSnmp.InEchoReps = &value case "InTimestamps": - procSnmp.Icmp.InTimestamps = &value + procSnmp.InTimestamps = &value case "InTimestampReps": - procSnmp.Icmp.InTimestampReps = &value + procSnmp.InTimestampReps = &value case "InAddrMasks": - procSnmp.Icmp.InAddrMasks = &value + procSnmp.InAddrMasks = &value case "InAddrMaskReps": - procSnmp.Icmp.InAddrMaskReps = &value + procSnmp.InAddrMaskReps = &value case "OutMsgs": - procSnmp.Icmp.OutMsgs = &value + procSnmp.OutMsgs = &value case "OutErrors": - procSnmp.Icmp.OutErrors = &value + procSnmp.OutErrors = &value case "OutDestUnreachs": - procSnmp.Icmp.OutDestUnreachs = &value + procSnmp.OutDestUnreachs = &value case "OutTimeExcds": - procSnmp.Icmp.OutTimeExcds = &value + procSnmp.OutTimeExcds = &value case "OutParmProbs": - procSnmp.Icmp.OutParmProbs = &value + procSnmp.OutParmProbs = &value case "OutSrcQuenchs": - procSnmp.Icmp.OutSrcQuenchs = &value + procSnmp.OutSrcQuenchs = &value case "OutRedirects": - procSnmp.Icmp.OutRedirects = &value + procSnmp.OutRedirects = &value case "OutEchos": - procSnmp.Icmp.OutEchos = &value + procSnmp.OutEchos = &value case "OutEchoReps": - procSnmp.Icmp.OutEchoReps = &value + procSnmp.OutEchoReps = &value case "OutTimestamps": - procSnmp.Icmp.OutTimestamps = &value + procSnmp.OutTimestamps = &value case "OutTimestampReps": - procSnmp.Icmp.OutTimestampReps = &value + procSnmp.OutTimestampReps = &value case "OutAddrMasks": - procSnmp.Icmp.OutAddrMasks = &value + procSnmp.OutAddrMasks = &value case "OutAddrMaskReps": - procSnmp.Icmp.OutAddrMaskReps = &value + procSnmp.OutAddrMaskReps = &value } case "IcmpMsg": switch key { case "InType3": - procSnmp.IcmpMsg.InType3 = &value + procSnmp.InType3 = &value case "OutType3": - procSnmp.IcmpMsg.OutType3 = &value + procSnmp.OutType3 = &value } case "Tcp": switch key { case "RtoAlgorithm": - procSnmp.Tcp.RtoAlgorithm = &value + procSnmp.RtoAlgorithm = &value case "RtoMin": - procSnmp.Tcp.RtoMin = &value + procSnmp.RtoMin = &value case "RtoMax": - procSnmp.Tcp.RtoMax = &value + procSnmp.RtoMax = &value case "MaxConn": - procSnmp.Tcp.MaxConn = &value + procSnmp.MaxConn = &value case "ActiveOpens": - procSnmp.Tcp.ActiveOpens = &value + procSnmp.ActiveOpens = &value case "PassiveOpens": - procSnmp.Tcp.PassiveOpens = &value + procSnmp.PassiveOpens = &value case "AttemptFails": - procSnmp.Tcp.AttemptFails = &value + procSnmp.AttemptFails = &value case "EstabResets": - procSnmp.Tcp.EstabResets = &value + procSnmp.EstabResets = &value case "CurrEstab": - procSnmp.Tcp.CurrEstab = &value + procSnmp.CurrEstab = &value case "InSegs": - procSnmp.Tcp.InSegs = &value + procSnmp.InSegs = &value case "OutSegs": - procSnmp.Tcp.OutSegs = &value + procSnmp.OutSegs = &value case "RetransSegs": - procSnmp.Tcp.RetransSegs = &value + procSnmp.RetransSegs = &value case "InErrs": - procSnmp.Tcp.InErrs = &value + procSnmp.InErrs = &value case "OutRsts": - procSnmp.Tcp.OutRsts = &value + procSnmp.OutRsts = &value case "InCsumErrors": procSnmp.Tcp.InCsumErrors = &value } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go index 3059cc6a13..fb7fd3995b 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -182,161 +182,161 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "Ip6": switch key { case "InReceives": - procSnmp6.Ip6.InReceives = &value + procSnmp6.InReceives = &value case "InHdrErrors": - procSnmp6.Ip6.InHdrErrors = &value + procSnmp6.InHdrErrors = &value case "InTooBigErrors": - procSnmp6.Ip6.InTooBigErrors = &value + procSnmp6.InTooBigErrors = &value case "InNoRoutes": - procSnmp6.Ip6.InNoRoutes = &value + procSnmp6.InNoRoutes = &value case "InAddrErrors": - procSnmp6.Ip6.InAddrErrors = &value + procSnmp6.InAddrErrors = &value case "InUnknownProtos": - procSnmp6.Ip6.InUnknownProtos = &value + procSnmp6.InUnknownProtos = &value case "InTruncatedPkts": - procSnmp6.Ip6.InTruncatedPkts = &value + procSnmp6.InTruncatedPkts = &value case "InDiscards": - procSnmp6.Ip6.InDiscards = &value + procSnmp6.InDiscards = &value case "InDelivers": - procSnmp6.Ip6.InDelivers = &value + procSnmp6.InDelivers = &value case "OutForwDatagrams": - procSnmp6.Ip6.OutForwDatagrams = &value + procSnmp6.OutForwDatagrams = &value case "OutRequests": - procSnmp6.Ip6.OutRequests = &value + procSnmp6.OutRequests = &value case "OutDiscards": - procSnmp6.Ip6.OutDiscards = &value + procSnmp6.OutDiscards = &value case "OutNoRoutes": - procSnmp6.Ip6.OutNoRoutes = &value + procSnmp6.OutNoRoutes = &value case "ReasmTimeout": - procSnmp6.Ip6.ReasmTimeout = &value + procSnmp6.ReasmTimeout = &value case "ReasmReqds": - procSnmp6.Ip6.ReasmReqds = &value + procSnmp6.ReasmReqds = &value case "ReasmOKs": - procSnmp6.Ip6.ReasmOKs = &value + procSnmp6.ReasmOKs = &value case "ReasmFails": - procSnmp6.Ip6.ReasmFails = &value + procSnmp6.ReasmFails = &value case "FragOKs": - procSnmp6.Ip6.FragOKs = &value + procSnmp6.FragOKs = &value case "FragFails": - procSnmp6.Ip6.FragFails = &value + procSnmp6.FragFails = &value case "FragCreates": - procSnmp6.Ip6.FragCreates = &value + procSnmp6.FragCreates = &value case "InMcastPkts": - procSnmp6.Ip6.InMcastPkts = &value + procSnmp6.InMcastPkts = &value case "OutMcastPkts": - procSnmp6.Ip6.OutMcastPkts = &value + procSnmp6.OutMcastPkts = &value case "InOctets": - procSnmp6.Ip6.InOctets = &value + procSnmp6.InOctets = &value case "OutOctets": - procSnmp6.Ip6.OutOctets = &value + procSnmp6.OutOctets = &value case "InMcastOctets": - procSnmp6.Ip6.InMcastOctets = &value + procSnmp6.InMcastOctets = &value case "OutMcastOctets": - procSnmp6.Ip6.OutMcastOctets = &value + procSnmp6.OutMcastOctets = &value case "InBcastOctets": - procSnmp6.Ip6.InBcastOctets = &value + procSnmp6.InBcastOctets = &value case "OutBcastOctets": - procSnmp6.Ip6.OutBcastOctets = &value + procSnmp6.OutBcastOctets = &value case "InNoECTPkts": - procSnmp6.Ip6.InNoECTPkts = &value + procSnmp6.InNoECTPkts = &value case "InECT1Pkts": - procSnmp6.Ip6.InECT1Pkts = &value + procSnmp6.InECT1Pkts = &value case "InECT0Pkts": - procSnmp6.Ip6.InECT0Pkts = &value + procSnmp6.InECT0Pkts = &value case "InCEPkts": - procSnmp6.Ip6.InCEPkts = &value + procSnmp6.InCEPkts = &value } case "Icmp6": switch key { case "InMsgs": - procSnmp6.Icmp6.InMsgs = &value + procSnmp6.InMsgs = &value case "InErrors": procSnmp6.Icmp6.InErrors = &value case "OutMsgs": - procSnmp6.Icmp6.OutMsgs = &value + procSnmp6.OutMsgs = &value case "OutErrors": - procSnmp6.Icmp6.OutErrors = &value + procSnmp6.OutErrors = &value case "InCsumErrors": procSnmp6.Icmp6.InCsumErrors = &value case "InDestUnreachs": - procSnmp6.Icmp6.InDestUnreachs = &value + procSnmp6.InDestUnreachs = &value case "InPktTooBigs": - procSnmp6.Icmp6.InPktTooBigs = &value + procSnmp6.InPktTooBigs = &value case "InTimeExcds": - procSnmp6.Icmp6.InTimeExcds = &value + procSnmp6.InTimeExcds = &value case "InParmProblems": - procSnmp6.Icmp6.InParmProblems = &value + procSnmp6.InParmProblems = &value case "InEchos": - procSnmp6.Icmp6.InEchos = &value + procSnmp6.InEchos = &value case "InEchoReplies": - procSnmp6.Icmp6.InEchoReplies = &value + procSnmp6.InEchoReplies = &value case "InGroupMembQueries": - procSnmp6.Icmp6.InGroupMembQueries = &value + procSnmp6.InGroupMembQueries = &value case "InGroupMembResponses": - procSnmp6.Icmp6.InGroupMembResponses = &value + procSnmp6.InGroupMembResponses = &value case "InGroupMembReductions": - procSnmp6.Icmp6.InGroupMembReductions = &value + procSnmp6.InGroupMembReductions = &value case "InRouterSolicits": - procSnmp6.Icmp6.InRouterSolicits = &value + procSnmp6.InRouterSolicits = &value case "InRouterAdvertisements": - procSnmp6.Icmp6.InRouterAdvertisements = &value + procSnmp6.InRouterAdvertisements = &value case "InNeighborSolicits": - procSnmp6.Icmp6.InNeighborSolicits = &value + procSnmp6.InNeighborSolicits = &value case "InNeighborAdvertisements": - procSnmp6.Icmp6.InNeighborAdvertisements = &value + procSnmp6.InNeighborAdvertisements = &value case "InRedirects": - procSnmp6.Icmp6.InRedirects = &value + procSnmp6.InRedirects = &value case "InMLDv2Reports": - procSnmp6.Icmp6.InMLDv2Reports = &value + procSnmp6.InMLDv2Reports = &value case "OutDestUnreachs": - procSnmp6.Icmp6.OutDestUnreachs = &value + procSnmp6.OutDestUnreachs = &value case "OutPktTooBigs": - procSnmp6.Icmp6.OutPktTooBigs = &value + procSnmp6.OutPktTooBigs = &value case "OutTimeExcds": - procSnmp6.Icmp6.OutTimeExcds = &value + procSnmp6.OutTimeExcds = &value case "OutParmProblems": - procSnmp6.Icmp6.OutParmProblems = &value + procSnmp6.OutParmProblems = &value case "OutEchos": - procSnmp6.Icmp6.OutEchos = &value + procSnmp6.OutEchos = &value case "OutEchoReplies": - procSnmp6.Icmp6.OutEchoReplies = &value + procSnmp6.OutEchoReplies = &value case "OutGroupMembQueries": - procSnmp6.Icmp6.OutGroupMembQueries = &value + procSnmp6.OutGroupMembQueries = &value case "OutGroupMembResponses": - procSnmp6.Icmp6.OutGroupMembResponses = &value + procSnmp6.OutGroupMembResponses = &value case "OutGroupMembReductions": - procSnmp6.Icmp6.OutGroupMembReductions = &value + procSnmp6.OutGroupMembReductions = &value case "OutRouterSolicits": - procSnmp6.Icmp6.OutRouterSolicits = &value + procSnmp6.OutRouterSolicits = &value case "OutRouterAdvertisements": - procSnmp6.Icmp6.OutRouterAdvertisements = &value + procSnmp6.OutRouterAdvertisements = &value case "OutNeighborSolicits": - procSnmp6.Icmp6.OutNeighborSolicits = &value + procSnmp6.OutNeighborSolicits = &value case "OutNeighborAdvertisements": - procSnmp6.Icmp6.OutNeighborAdvertisements = &value + procSnmp6.OutNeighborAdvertisements = &value case "OutRedirects": - procSnmp6.Icmp6.OutRedirects = &value + procSnmp6.OutRedirects = &value case "OutMLDv2Reports": - procSnmp6.Icmp6.OutMLDv2Reports = &value + procSnmp6.OutMLDv2Reports = &value case "InType1": - procSnmp6.Icmp6.InType1 = &value + procSnmp6.InType1 = &value case "InType134": - procSnmp6.Icmp6.InType134 = &value + procSnmp6.InType134 = &value case "InType135": - procSnmp6.Icmp6.InType135 = &value + procSnmp6.InType135 = &value case "InType136": - procSnmp6.Icmp6.InType136 = &value + procSnmp6.InType136 = &value case "InType143": - procSnmp6.Icmp6.InType143 = &value + procSnmp6.InType143 = &value case "OutType133": - procSnmp6.Icmp6.OutType133 = &value + procSnmp6.OutType133 = &value case "OutType135": - procSnmp6.Icmp6.OutType135 = &value + procSnmp6.OutType135 = &value case "OutType136": - procSnmp6.Icmp6.OutType136 = &value + procSnmp6.OutType136 = &value case "OutType143": - procSnmp6.Icmp6.OutType143 = &value + procSnmp6.OutType143 = &value } case "Udp6": switch key { @@ -355,7 +355,7 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "InCsumErrors": procSnmp6.Udp6.InCsumErrors = &value case "IgnoredMulti": - procSnmp6.Udp6.IgnoredMulti = &value + procSnmp6.IgnoredMulti = &value } case "UdpLite6": switch key { diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 06a8d931c9..3328556bdc 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -101,6 +101,12 @@ type ProcStat struct { RSS int // Soft limit in bytes on the rss of the process. RSSLimit uint64 + // The address above which program text can run. + StartCode uint64 + // The address below which program text can run. + EndCode uint64 + // The address of the start (i.e., bottom) of the stack. + StartStack uint64 // CPU number last executed on. Processor uint // Real-time scheduling priority, a number in the range 1 to 99 for processes @@ -177,9 +183,9 @@ func (p Proc) Stat() (ProcStat, error) { &s.VSize, &s.RSS, &s.RSSLimit, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, + &s.StartCode, + &s.EndCode, + &s.StartStack, &ignoreUint64, &ignoreUint64, &ignoreUint64, diff --git a/vendor/github.com/prometheus/procfs/proc_statm.go b/vendor/github.com/prometheus/procfs/proc_statm.go new file mode 100644 index 0000000000..ed57984243 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_statm.go @@ -0,0 +1,116 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// - https://man7.org/linux/man-pages/man5/proc_pid_statm.5.html + +// ProcStatm Provides memory usage information for a process, measured in memory pages. +// Read from /proc/[pid]/statm. +type ProcStatm struct { + // The process ID. + PID int + // total program size (same as VmSize in status) + Size uint64 + // resident set size (same as VmRSS in status) + Resident uint64 + // number of resident shared pages (i.e., backed by a file) + Shared uint64 + // text (code) + Text uint64 + // library (unused since Linux 2.6; always 0) + Lib uint64 + // data + stack + Data uint64 + // dirty pages (unused since Linux 2.6; always 0) + Dt uint64 +} + +// NewStatm returns the current status information of the process. +// Deprecated: Use p.Statm() instead. +func (p Proc) NewStatm() (ProcStatm, error) { + return p.Statm() +} + +// Statm returns the current memory usage information of the process. +func (p Proc) Statm() (ProcStatm, error) { + data, err := util.ReadFileNoStat(p.path("statm")) + if err != nil { + return ProcStatm{}, err + } + + statmSlice, err := parseStatm(data) + if err != nil { + return ProcStatm{}, err + } + + procStatm := ProcStatm{ + PID: p.PID, + Size: statmSlice[0], + Resident: statmSlice[1], + Shared: statmSlice[2], + Text: statmSlice[3], + Lib: statmSlice[4], + Data: statmSlice[5], + Dt: statmSlice[6], + } + + return procStatm, nil +} + +// parseStatm return /proc/[pid]/statm data to uint64 slice. +func parseStatm(data []byte) ([]uint64, error) { + var statmSlice []uint64 + statmItems := strings.Fields(string(data)) + for i := 0; i < len(statmItems); i++ { + statmItem, err := strconv.ParseUint(statmItems[i], 10, 64) + if err != nil { + return nil, err + } + statmSlice = append(statmSlice, statmItem) + } + return statmSlice, nil +} + +// SizeBytes returns the process of total program size in bytes. +func (s ProcStatm) SizeBytes() uint64 { + return s.Size * uint64(os.Getpagesize()) +} + +// ResidentBytes returns the process of resident set size in bytes. +func (s ProcStatm) ResidentBytes() uint64 { + return s.Resident * uint64(os.Getpagesize()) +} + +// SHRBytes returns the process of share memory size in bytes. +func (s ProcStatm) SHRBytes() uint64 { + return s.Shared * uint64(os.Getpagesize()) +} + +// TextBytes returns the process of text (code) size in bytes. +func (s ProcStatm) TextBytes() uint64 { + return s.Text * uint64(os.Getpagesize()) +} + +// DataBytes returns the process of data + stack size in bytes. +func (s ProcStatm) DataBytes() uint64 { + return s.Data * uint64(os.Getpagesize()) +} diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index a055197c63..dd8aa56885 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt } } case "NSpid": - s.NSpids = calcNSPidsList(vString) + nspids, err := calcNSPidsList(vString) + if err != nil { + return err + } + s.NSpids = nspids case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 { return g } -func calcNSPidsList(nspidsString string) []uint64 { - s := strings.Split(nspidsString, " ") +func calcNSPidsList(nspidsString string) ([]uint64, error) { + s := strings.Split(nspidsString, "\t") var nspids []uint64 for _, nspid := range s { - nspid, _ := strconv.ParseUint(nspid, 10, 64) - if nspid == 0 { - continue + nspid, err := strconv.ParseUint(nspid, 10, 64) + if err != nil { + return nil, err } nspids = append(nspids, nspid) } - return nspids + return nspids, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go index 5eefbe2ef8..3810d1ac99 100644 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -21,7 +21,7 @@ import ( ) func sysctlToPath(sysctl string) string { - return strings.Replace(sysctl, ".", "/", -1) + return strings.ReplaceAll(sysctl, ".", "/") } func (fs FS) SysctlStrings(sysctl string) ([]string, error) { diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go index 28708e0745..403e6ae708 100644 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -68,8 +68,8 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { if len(parts) < 2 { continue } - switch { - case parts[0] == "HI:": + switch parts[0] { + case "HI:": perCPU := parts[1:] softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -77,7 +77,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "TIMER:": + case "TIMER:": perCPU := parts[1:] softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -85,7 +85,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "NET_TX:": + case "NET_TX:": perCPU := parts[1:] softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -93,7 +93,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "NET_RX:": + case "NET_RX:": perCPU := parts[1:] softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -101,7 +101,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "BLOCK:": + case "BLOCK:": perCPU := parts[1:] softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -109,7 +109,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "IRQ_POLL:": + case "IRQ_POLL:": perCPU := parts[1:] softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -117,7 +117,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "TASKLET:": + case "TASKLET:": perCPU := parts[1:] softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -125,7 +125,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "SCHED:": + case "SCHED:": perCPU := parts[1:] softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -133,7 +133,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "HRTIMER:": + case "HRTIMER:": perCPU := parts[1:] softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -141,7 +141,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "RCU:": + case "RCU:": perCPU := parts[1:] softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 93a798ab63..794b2e32bf 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -85,7 +85,7 @@ func (lim *Limiter) Burst() int { // TokensAt returns the number of tokens available at time t. func (lim *Limiter) TokensAt(t time.Time) float64 { lim.mu.Lock() - _, tokens := lim.advance(t) // does not mutate lim + tokens := lim.advance(t) // does not mutate lim lim.mu.Unlock() return tokens } @@ -186,7 +186,7 @@ func (r *Reservation) CancelAt(t time.Time) { return } // advance time to now - t, tokens := r.lim.advance(t) + tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { @@ -307,7 +307,7 @@ func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - t, tokens := lim.advance(t) + tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -324,7 +324,7 @@ func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - t, tokens := lim.advance(t) + tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -347,7 +347,7 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) } } - t, tokens := lim.advance(t) + tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -380,10 +380,11 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) return r } -// advance calculates and returns an updated state for lim resulting from the passage of time. +// advance calculates and returns an updated number of tokens for lim +// resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newTokens float64) { last := lim.last if t.Before(last) { last = t @@ -396,7 +397,7 @@ func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { if burst := float64(lim.burst); tokens > burst { tokens = burst } - return t, tokens + return tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration @@ -405,8 +406,15 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration { if limit <= 0 { return InfDuration } - seconds := tokens / float64(limit) - return time.Duration(float64(time.Second) * seconds) + + duration := (tokens / float64(limit)) * float64(time.Second) + + // Cap the duration to the maximum representable int64 value, to avoid overflow. + if duration > float64(math.MaxInt64) { + return InfDuration + } + + return time.Duration(duration) } // tokensFromDuration is a unit conversion function from a time duration to the number of tokens diff --git a/vendor/golang.org/x/time/rate/sometimes.go b/vendor/golang.org/x/time/rate/sometimes.go index 6ba99ddb67..9b83932692 100644 --- a/vendor/golang.org/x/time/rate/sometimes.go +++ b/vendor/golang.org/x/time/rate/sometimes.go @@ -61,7 +61,9 @@ func (s *Sometimes) Do(f func()) { (s.Every > 0 && s.count%s.Every == 0) || (s.Interval > 0 && time.Since(s.last) >= s.Interval) { f() - s.last = time.Now() + if s.Interval > 0 { + s.last = time.Now() + } } s.count++ } diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 5a57ef6f3c..323829da14 100644 Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 10132c9b38..b08b71830c 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -69,6 +69,9 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value case genid.FeatureSet_JsonFormat_field_number: parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value + case genid.FeatureSet_EnforceNamingStyle_field_number: + // EnforceNamingStyle is enforced in protoc, languages other than C++ + // are not supposed to do anything with this feature. default: panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index f30ab6b586..39524782ad 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -1014,6 +1014,7 @@ const ( FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style" FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" @@ -1021,6 +1022,7 @@ const ( FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" + FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style" ) // Field numbers for google.protobuf.FeatureSet. @@ -1031,6 +1033,7 @@ const ( FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 + FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7 ) // Full and short names for google.protobuf.FeatureSet.FieldPresence. @@ -1112,6 +1115,19 @@ const ( FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 ) +// Full and short names for google.protobuf.FeatureSet.EnforceNamingStyle. +const ( + FeatureSet_EnforceNamingStyle_enum_fullname = "google.protobuf.FeatureSet.EnforceNamingStyle" + FeatureSet_EnforceNamingStyle_enum_name = "EnforceNamingStyle" +) + +// Enum values for google.protobuf.FeatureSet.EnforceNamingStyle. +const ( + FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN_enum_value = 0 + FeatureSet_STYLE2024_enum_value = 1 + FeatureSet_STYLE_LEGACY_enum_value = 2 +) + // Names for google.protobuf.FeatureSetDefaults. const ( FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go similarity index 99% rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index 1ffddf6877..42dd6f70c6 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 - package strs import ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go deleted file mode 100644 index 832a7988f1..0000000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package strs - -import ( - "unsafe" - - "google.golang.org/protobuf/reflect/protoreflect" -) - -type ( - stringHeader struct { - Data unsafe.Pointer - Len int - } - sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int - } -) - -// UnsafeString returns an unsafe string reference of b. -// The caller must treat the input slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user -// unless the input slice is provably immutable. -func UnsafeString(b []byte) (s string) { - src := (*sliceHeader)(unsafe.Pointer(&b)) - dst := (*stringHeader)(unsafe.Pointer(&s)) - dst.Data = src.Data - dst.Len = src.Len - return s -} - -// UnsafeBytes returns an unsafe bytes slice reference of s. -// The caller must treat returned slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user. -func UnsafeBytes(s string) (b []byte) { - src := (*stringHeader)(unsafe.Pointer(&s)) - dst := (*sliceHeader)(unsafe.Pointer(&b)) - dst.Data = src.Data - dst.Len = src.Len - dst.Cap = src.Len - return b -} - -// Builder builds a set of strings with shared lifetime. -// This differs from strings.Builder, which is for building a single string. -type Builder struct { - buf []byte -} - -// AppendFullName is equivalent to protoreflect.FullName.Append, -// but optimized for large batches where each name has a shared lifetime. -func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { - n := len(prefix) + len(".") + len(name) - if len(prefix) == 0 { - n -= len(".") - } - sb.grow(n) - sb.buf = append(sb.buf, prefix...) - sb.buf = append(sb.buf, '.') - sb.buf = append(sb.buf, name...) - return protoreflect.FullName(sb.last(n)) -} - -// MakeString is equivalent to string(b), but optimized for large batches -// with a shared lifetime. -func (sb *Builder) MakeString(b []byte) string { - sb.grow(len(b)) - sb.buf = append(sb.buf, b...) - return sb.last(len(b)) -} - -func (sb *Builder) grow(n int) { - if cap(sb.buf)-len(sb.buf) >= n { - return - } - - // Unlike strings.Builder, we do not need to copy over the contents - // of the old buffer since our builder provides no API for - // retrieving previously created strings. - sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) -} - -func (sb *Builder) last(n int) string { - return UnsafeString(sb.buf[len(sb.buf)-n:]) -} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 01efc33030..aac1cb18a7 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 5 + Patch = 6 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go index 3c6fe57807..ef55b97dde 100644 --- a/vendor/google.golang.org/protobuf/proto/merge.go +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -59,6 +59,12 @@ func Clone(m Message) Message { return dst.Interface() } +// CloneOf returns a deep copy of m. If the top-level message is invalid, +// it returns an invalid message as well. +func CloneOf[M Message](m M) M { + return Clone(m).(M) +} + // mergeOptions provides a namespace for merge functions, and can be // exported in the future if we add user-visible merge options. type mergeOptions struct{} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index ea154eec44..a4a0a2971d 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -398,6 +398,8 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte { b = p.appendSingularField(b, "message_encoding", nil) case 6: b = p.appendSingularField(b, "json_format", nil) + case 7: + b = p.appendSingularField(b, "enforce_naming_style", nil) } return b } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go similarity index 99% rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go index 479527b58d..fe17f37220 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 - package protoreflect import ( diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go deleted file mode 100644 index 0015fcb35d..0000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package protoreflect - -import ( - "unsafe" - - "google.golang.org/protobuf/internal/pragma" -) - -type ( - stringHeader struct { - Data unsafe.Pointer - Len int - } - sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int - } - ifaceHeader struct { - Type unsafe.Pointer - Data unsafe.Pointer - } -) - -var ( - nilType = typeOf(nil) - boolType = typeOf(*new(bool)) - int32Type = typeOf(*new(int32)) - int64Type = typeOf(*new(int64)) - uint32Type = typeOf(*new(uint32)) - uint64Type = typeOf(*new(uint64)) - float32Type = typeOf(*new(float32)) - float64Type = typeOf(*new(float64)) - stringType = typeOf(*new(string)) - bytesType = typeOf(*new([]byte)) - enumType = typeOf(*new(EnumNumber)) -) - -// typeOf returns a pointer to the Go type information. -// The pointer is comparable and equal if and only if the types are identical. -func typeOf(t any) unsafe.Pointer { - return (*ifaceHeader)(unsafe.Pointer(&t)).Type -} - -// value is a union where only one type can be represented at a time. -// The struct is 24B large on 64-bit systems and requires the minimum storage -// necessary to represent each possible type. -// -// The Go GC needs to be able to scan variables containing pointers. -// As such, pointers and non-pointers cannot be intermixed. -type value struct { - pragma.DoNotCompare // 0B - - // typ stores the type of the value as a pointer to the Go type. - typ unsafe.Pointer // 8B - - // ptr stores the data pointer for a String, Bytes, or interface value. - ptr unsafe.Pointer // 8B - - // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or - // Enum value as a raw uint64. - // - // It is also used to store the length of a String or Bytes value; - // the capacity is ignored. - num uint64 // 8B -} - -func valueOfString(v string) Value { - p := (*stringHeader)(unsafe.Pointer(&v)) - return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))} -} -func valueOfBytes(v []byte) Value { - p := (*sliceHeader)(unsafe.Pointer(&v)) - return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} -} -func valueOfIface(v any) Value { - p := (*ifaceHeader)(unsafe.Pointer(&v)) - return Value{typ: p.Type, ptr: p.Data} -} - -func (v Value) getString() (x string) { - *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)} - return x -} -func (v Value) getBytes() (x []byte) { - *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} - return x -} -func (v Value) getIface() (x any) { - *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} - return x -} diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index a516337674..7fe280f194 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -1139,6 +1139,65 @@ func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} } +type FeatureSet_EnforceNamingStyle int32 + +const ( + FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN FeatureSet_EnforceNamingStyle = 0 + FeatureSet_STYLE2024 FeatureSet_EnforceNamingStyle = 1 + FeatureSet_STYLE_LEGACY FeatureSet_EnforceNamingStyle = 2 +) + +// Enum value maps for FeatureSet_EnforceNamingStyle. +var ( + FeatureSet_EnforceNamingStyle_name = map[int32]string{ + 0: "ENFORCE_NAMING_STYLE_UNKNOWN", + 1: "STYLE2024", + 2: "STYLE_LEGACY", + } + FeatureSet_EnforceNamingStyle_value = map[string]int32{ + "ENFORCE_NAMING_STYLE_UNKNOWN": 0, + "STYLE2024": 1, + "STYLE_LEGACY": 2, + } +) + +func (x FeatureSet_EnforceNamingStyle) Enum() *FeatureSet_EnforceNamingStyle { + p := new(FeatureSet_EnforceNamingStyle) + *p = x + return p +} + +func (x FeatureSet_EnforceNamingStyle) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() +} + +func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[16] +} + +func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_EnforceNamingStyle) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_EnforceNamingStyle(num) + return nil +} + +// Deprecated: Use FeatureSet_EnforceNamingStyle.Descriptor instead. +func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -1177,11 +1236,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[16] + return &file_google_protobuf_descriptor_proto_enumTypes[17] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -1277,8 +1336,14 @@ type FileDescriptorProto struct { // The supported values are "proto2", "proto3", and "editions". // // If `edition` is present, this value must be "editions". + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` // The edition of the proto file. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -2212,6 +2277,9 @@ type FileOptions struct { // determining the ruby package. RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. @@ -2482,6 +2550,9 @@ type MessageOptions struct { // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2648,6 +2719,9 @@ type FieldOptions struct { Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. @@ -2799,6 +2873,9 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { type OneofOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2871,6 +2948,9 @@ type EnumOptions struct { // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2958,6 +3038,9 @@ type EnumValueOptions struct { // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` // Indicate that fields annotated with this enum value should not be printed // out when using debug formats, e.g. when the field contains sensitive @@ -3046,6 +3129,9 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { type ServiceOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations @@ -3124,6 +3210,9 @@ type MethodOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -3310,6 +3399,7 @@ type FeatureSet struct { Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` + EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"` extensionFields protoimpl.ExtensionFields unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -3387,6 +3477,13 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { return FeatureSet_JSON_FORMAT_UNKNOWN } +func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle { + if x != nil && x.EnforceNamingStyle != nil { + return *x.EnforceNamingStyle + } + return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN +} + // A compiled specification for the defaults of a set of features. These // messages are generated from FeatureSet extensions and can be used to seed // feature resolution. The resolution with this object becomes a simple search @@ -4361,777 +4458,367 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor -var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{ - 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, - 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, - 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, - 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, - 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28, - 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, - 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65, - 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, - 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, - 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, - 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, - 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, - 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, - 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, - 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, - 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, - 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02, - 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, - 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, - 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, - 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, - 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, - 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, - 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, - 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, - 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, - 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, - 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, - 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, - 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, - 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, - 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, - 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, - 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, - 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, - 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, - 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, - 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, - 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, - 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, - 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, - 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, - 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, - 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, - 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, - 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, - 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, - 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, - 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, - 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, - 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, - 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, - 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, - 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, - 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, - 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, - 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, - 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, - 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, - 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, - 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, - 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, - 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, - 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, - 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, - 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, - 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, - 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, - 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, - 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, - 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, - 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, - 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, - 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, - 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, - 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, - 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, - 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, - 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, - 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, - 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, - 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, - 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, - 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, - 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, - 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, - 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, - 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10, - 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, - 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99, - 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, - 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, - 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, - 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, - 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, - 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, - 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, - 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, - 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, - 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e, - 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, - 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, - 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12, - 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, - 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, - 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65, - 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, - 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2, - 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, - 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, - 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, - 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, - 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, - 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, - 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73, - 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, - 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, - 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, - 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, - 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, - 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, - 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, - 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, - 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, - 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01, - 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, - 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, - 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, - 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, - 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, - 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, - 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, - 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, - 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, - 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, - 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, - 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08, - 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, - 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08, - 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, - 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, - 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, - 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, - 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, - 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, - 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7, - 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43, - 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, - 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, - 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, - 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, - 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, - 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, - 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, - 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, - 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, - 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, - 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, -}) +const file_google_protobuf_descriptor_proto_rawDesc = "" + + "\n" + + " google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" + + "\x11FileDescriptorSet\x128\n" + + "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\x98\x05\n" + + "\x13FileDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + + "\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" + + "\n" + + "dependency\x18\x03 \x03(\tR\n" + + "dependency\x12+\n" + + "\x11public_dependency\x18\n" + + " \x03(\x05R\x10publicDependency\x12'\n" + + "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12C\n" + + "\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" + + "\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" + + "\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" + + "\textension\x18\a \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x126\n" + + "\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" + + "\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" + + "\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" + + "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xb9\x06\n" + + "\x0fDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12;\n" + + "\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" + + "\textension\x18\x06 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x12A\n" + + "\vnested_type\x18\x03 \x03(\v2 .google.protobuf.DescriptorProtoR\n" + + "nestedType\x12A\n" + + "\tenum_type\x18\x04 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12X\n" + + "\x0fextension_range\x18\x05 \x03(\v2/.google.protobuf.DescriptorProto.ExtensionRangeR\x0eextensionRange\x12D\n" + + "\n" + + "oneof_decl\x18\b \x03(\v2%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x129\n" + + "\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" + + "\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" + + "\rreserved_name\x18\n" + + " \x03(\tR\freservedName\x1az\n" + + "\x0eExtensionRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" + + "\aoptions\x18\x03 \x01(\v2&.google.protobuf.ExtensionRangeOptionsR\aoptions\x1a7\n" + + "\rReservedRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\"\xcc\x04\n" + + "\x15ExtensionRangeOptions\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n" + + "\vdeclaration\x18\x02 \x03(\v22.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\vdeclaration\x127\n" + + "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12m\n" + + "\fverification\x18\x03 \x01(\x0e28.google.protobuf.ExtensionRangeOptions.VerificationState:\n" + + "UNVERIFIEDB\x03\x88\x01\x02R\fverification\x1a\x94\x01\n" + + "\vDeclaration\x12\x16\n" + + "\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n" + + "\tfull_name\x18\x02 \x01(\tR\bfullName\x12\x12\n" + + "\x04type\x18\x03 \x01(\tR\x04type\x12\x1a\n" + + "\breserved\x18\x05 \x01(\bR\breserved\x12\x1a\n" + + "\brepeated\x18\x06 \x01(\bR\brepeatedJ\x04\b\x04\x10\x05\"4\n" + + "\x11VerificationState\x12\x0f\n" + + "\vDECLARATION\x10\x00\x12\x0e\n" + + "\n" + + "UNVERIFIED\x10\x01*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xc1\x06\n" + + "\x14FieldDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" + + "\x06number\x18\x03 \x01(\x05R\x06number\x12A\n" + + "\x05label\x18\x04 \x01(\x0e2+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n" + + "\x04type\x18\x05 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n" + + "\ttype_name\x18\x06 \x01(\tR\btypeName\x12\x1a\n" + + "\bextendee\x18\x02 \x01(\tR\bextendee\x12#\n" + + "\rdefault_value\x18\a \x01(\tR\fdefaultValue\x12\x1f\n" + + "\voneof_index\x18\t \x01(\x05R\n" + + "oneofIndex\x12\x1b\n" + + "\tjson_name\x18\n" + + " \x01(\tR\bjsonName\x127\n" + + "\aoptions\x18\b \x01(\v2\x1d.google.protobuf.FieldOptionsR\aoptions\x12'\n" + + "\x0fproto3_optional\x18\x11 \x01(\bR\x0eproto3Optional\"\xb6\x02\n" + + "\x04Type\x12\x0f\n" + + "\vTYPE_DOUBLE\x10\x01\x12\x0e\n" + + "\n" + + "TYPE_FLOAT\x10\x02\x12\x0e\n" + + "\n" + + "TYPE_INT64\x10\x03\x12\x0f\n" + + "\vTYPE_UINT64\x10\x04\x12\x0e\n" + + "\n" + + "TYPE_INT32\x10\x05\x12\x10\n" + + "\fTYPE_FIXED64\x10\x06\x12\x10\n" + + "\fTYPE_FIXED32\x10\a\x12\r\n" + + "\tTYPE_BOOL\x10\b\x12\x0f\n" + + "\vTYPE_STRING\x10\t\x12\x0e\n" + + "\n" + + "TYPE_GROUP\x10\n" + + "\x12\x10\n" + + "\fTYPE_MESSAGE\x10\v\x12\x0e\n" + + "\n" + + "TYPE_BYTES\x10\f\x12\x0f\n" + + "\vTYPE_UINT32\x10\r\x12\r\n" + + "\tTYPE_ENUM\x10\x0e\x12\x11\n" + + "\rTYPE_SFIXED32\x10\x0f\x12\x11\n" + + "\rTYPE_SFIXED64\x10\x10\x12\x0f\n" + + "\vTYPE_SINT32\x10\x11\x12\x0f\n" + + "\vTYPE_SINT64\x10\x12\"C\n" + + "\x05Label\x12\x12\n" + + "\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n" + + "\x0eLABEL_REPEATED\x10\x03\x12\x12\n" + + "\x0eLABEL_REQUIRED\x10\x02\"c\n" + + "\x14OneofDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x127\n" + + "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xe3\x02\n" + + "\x13EnumDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12?\n" + + "\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" + + "\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" + + "\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" + + "\rreserved_name\x18\x05 \x03(\tR\freservedName\x1a;\n" + + "\x11EnumReservedRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" + + "\x18EnumValueDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" + + "\x06number\x18\x02 \x01(\x05R\x06number\x12;\n" + + "\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xa7\x01\n" + + "\x16ServiceDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12>\n" + + "\x06method\x18\x02 \x03(\v2&.google.protobuf.MethodDescriptorProtoR\x06method\x129\n" + + "\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptions\"\x89\x02\n" + + "\x15MethodDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n" + + "\n" + + "input_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n" + + "\voutput_type\x18\x03 \x01(\tR\n" + + "outputType\x128\n" + + "\aoptions\x18\x04 \x01(\v2\x1e.google.protobuf.MethodOptionsR\aoptions\x120\n" + + "\x10client_streaming\x18\x05 \x01(\b:\x05falseR\x0fclientStreaming\x120\n" + + "\x10server_streaming\x18\x06 \x01(\b:\x05falseR\x0fserverStreaming\"\xad\t\n" + + "\vFileOptions\x12!\n" + + "\fjava_package\x18\x01 \x01(\tR\vjavaPackage\x120\n" + + "\x14java_outer_classname\x18\b \x01(\tR\x12javaOuterClassname\x125\n" + + "\x13java_multiple_files\x18\n" + + " \x01(\b:\x05falseR\x11javaMultipleFiles\x12D\n" + + "\x1djava_generate_equals_and_hash\x18\x14 \x01(\bB\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n" + + "\x16java_string_check_utf8\x18\x1b \x01(\b:\x05falseR\x13javaStringCheckUtf8\x12S\n" + + "\foptimize_for\x18\t \x01(\x0e2).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\voptimizeFor\x12\x1d\n" + + "\n" + + "go_package\x18\v \x01(\tR\tgoPackage\x125\n" + + "\x13cc_generic_services\x18\x10 \x01(\b:\x05falseR\x11ccGenericServices\x129\n" + + "\x15java_generic_services\x18\x11 \x01(\b:\x05falseR\x13javaGenericServices\x125\n" + + "\x13py_generic_services\x18\x12 \x01(\b:\x05falseR\x11pyGenericServices\x12%\n" + + "\n" + + "deprecated\x18\x17 \x01(\b:\x05falseR\n" + + "deprecated\x12.\n" + + "\x10cc_enable_arenas\x18\x1f \x01(\b:\x04trueR\x0eccEnableArenas\x12*\n" + + "\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n" + + "\x10csharp_namespace\x18% \x01(\tR\x0fcsharpNamespace\x12!\n" + + "\fswift_prefix\x18' \x01(\tR\vswiftPrefix\x12(\n" + + "\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n" + + "\rphp_namespace\x18) \x01(\tR\fphpNamespace\x124\n" + + "\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n" + + "\fruby_package\x18- \x01(\tR\vrubyPackage\x127\n" + + "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n" + + "\fOptimizeMode\x12\t\n" + + "\x05SPEED\x10\x01\x12\r\n" + + "\tCODE_SIZE\x10\x02\x12\x10\n" + + "\fLITE_RUNTIME\x10\x03*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b*\x10+J\x04\b&\x10'R\x14php_generic_services\"\xf4\x03\n" + + "\x0eMessageOptions\x12<\n" + + "\x17message_set_wire_format\x18\x01 \x01(\b:\x05falseR\x14messageSetWireFormat\x12L\n" + + "\x1fno_standard_descriptor_accessor\x18\x02 \x01(\b:\x05falseR\x1cnoStandardDescriptorAccessor\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12\x1b\n" + + "\tmap_entry\x18\a \x01(\bR\bmapEntry\x12V\n" + + "&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + + "\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" + + "\"\x9d\r\n" + + "\fFieldOptions\x12A\n" + + "\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" + + "\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" + + "\x06jstype\x18\x06 \x01(\x0e2$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n" + + "\x04lazy\x18\x05 \x01(\b:\x05falseR\x04lazy\x12.\n" + + "\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12\x19\n" + + "\x04weak\x18\n" + + " \x01(\b:\x05falseR\x04weak\x12(\n" + + "\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" + + "\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" + + "\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" + + "\x10edition_defaults\x18\x14 \x03(\v2,.google.protobuf.FieldOptions.EditionDefaultR\x0feditionDefaults\x127\n" + + "\bfeatures\x18\x15 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12U\n" + + "\x0ffeature_support\x18\x16 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x1aZ\n" + + "\x0eEditionDefault\x122\n" + + "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value\x1a\x96\x02\n" + + "\x0eFeatureSupport\x12G\n" + + "\x12edition_introduced\x18\x01 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionIntroduced\x12G\n" + + "\x12edition_deprecated\x18\x02 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionDeprecated\x12/\n" + + "\x13deprecation_warning\x18\x03 \x01(\tR\x12deprecationWarning\x12A\n" + + "\x0fedition_removed\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eeditionRemoved\"/\n" + + "\x05CType\x12\n" + + "\n" + + "\x06STRING\x10\x00\x12\b\n" + + "\x04CORD\x10\x01\x12\x10\n" + + "\fSTRING_PIECE\x10\x02\"5\n" + + "\x06JSType\x12\r\n" + + "\tJS_NORMAL\x10\x00\x12\r\n" + + "\tJS_STRING\x10\x01\x12\r\n" + + "\tJS_NUMBER\x10\x02\"U\n" + + "\x0fOptionRetention\x12\x15\n" + + "\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n" + + "\x11RETENTION_RUNTIME\x10\x01\x12\x14\n" + + "\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n" + + "\x10OptionTargetType\x12\x17\n" + + "\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n" + + "\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n" + + "\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n" + + "\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n" + + "\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n" + + "\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n" + + "\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n" + + "\x16TARGET_TYPE_ENUM_ENTRY\x10\a\x12\x17\n" + + "\x13TARGET_TYPE_SERVICE\x10\b\x12\x16\n" + + "\x12TARGET_TYPE_METHOD\x10\t*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x12\x10\x13\"\xac\x01\n" + + "\fOneofOptions\x127\n" + + "\bfeatures\x18\x01 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd1\x02\n" + + "\vEnumOptions\x12\x1f\n" + + "\vallow_alias\x18\x02 \x01(\bR\n" + + "allowAlias\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12V\n" + + "&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + + "\bfeatures\x18\a \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x05\x10\x06\"\xd8\x02\n" + + "\x10EnumValueOptions\x12%\n" + + "\n" + + "deprecated\x18\x01 \x01(\b:\x05falseR\n" + + "deprecated\x127\n" + + "\bfeatures\x18\x02 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12(\n" + + "\fdebug_redact\x18\x03 \x01(\b:\x05falseR\vdebugRedact\x12U\n" + + "\x0ffeature_support\x18\x04 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd5\x01\n" + + "\x0eServiceOptions\x127\n" + + "\bfeatures\x18\" \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12%\n" + + "\n" + + "deprecated\x18! \x01(\b:\x05falseR\n" + + "deprecated\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x99\x03\n" + + "\rMethodOptions\x12%\n" + + "\n" + + "deprecated\x18! \x01(\b:\x05falseR\n" + + "deprecated\x12q\n" + + "\x11idempotency_level\x18\" \x01(\x0e2/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x127\n" + + "\bfeatures\x18# \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n" + + "\x10IdempotencyLevel\x12\x17\n" + + "\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n" + + "\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n" + + "\n" + + "IDEMPOTENT\x10\x02*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x9a\x03\n" + + "\x13UninterpretedOption\x12A\n" + + "\x04name\x18\x02 \x03(\v2-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n" + + "\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n" + + "\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n" + + "\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n" + + "\fdouble_value\x18\x06 \x01(\x01R\vdoubleValue\x12!\n" + + "\fstring_value\x18\a \x01(\fR\vstringValue\x12'\n" + + "\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" + + "\bNamePart\x12\x1b\n" + + "\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" + + "\fis_extension\x18\x02 \x02(\bR\visExtension\"\xae\f\n" + + "\n" + + "FeatureSet\x12\x91\x01\n" + + "\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" + + "\tenum_type\x18\x02 \x01(\x0e2$.google.protobuf.FeatureSet.EnumTypeB)\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\v\x12\x06CLOSED\x18\x84\a\xa2\x01\t\x12\x04OPEN\x18\xe7\a\xb2\x01\x03\b\xe8\aR\benumType\x12\x98\x01\n" + + "\x17repeated_field_encoding\x18\x03 \x01(\x0e21.google.protobuf.FeatureSet.RepeatedFieldEncodingB-\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPANDED\x18\x84\a\xa2\x01\v\x12\x06PACKED\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x15repeatedFieldEncoding\x12~\n" + + "\x0futf8_validation\x18\x04 \x01(\x0e2*.google.protobuf.FeatureSet.Utf8ValidationB)\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\t\x12\x04NONE\x18\x84\a\xa2\x01\v\x12\x06VERIFY\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x0eutf8Validation\x12~\n" + + "\x10message_encoding\x18\x05 \x01(\x0e2+.google.protobuf.FeatureSet.MessageEncodingB&\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\x14\x12\x0fLENGTH_PREFIXED\x18\x84\a\xb2\x01\x03\b\xe8\aR\x0fmessageEncoding\x12\x82\x01\n" + + "\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" + + "\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" + + "jsonFormat\x12\xab\x01\n" + + "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\"\\\n" + + "\rFieldPresence\x12\x1a\n" + + "\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" + + "\bEXPLICIT\x10\x01\x12\f\n" + + "\bIMPLICIT\x10\x02\x12\x13\n" + + "\x0fLEGACY_REQUIRED\x10\x03\"7\n" + + "\bEnumType\x12\x15\n" + + "\x11ENUM_TYPE_UNKNOWN\x10\x00\x12\b\n" + + "\x04OPEN\x10\x01\x12\n" + + "\n" + + "\x06CLOSED\x10\x02\"V\n" + + "\x15RepeatedFieldEncoding\x12#\n" + + "\x1fREPEATED_FIELD_ENCODING_UNKNOWN\x10\x00\x12\n" + + "\n" + + "\x06PACKED\x10\x01\x12\f\n" + + "\bEXPANDED\x10\x02\"I\n" + + "\x0eUtf8Validation\x12\x1b\n" + + "\x17UTF8_VALIDATION_UNKNOWN\x10\x00\x12\n" + + "\n" + + "\x06VERIFY\x10\x02\x12\b\n" + + "\x04NONE\x10\x03\"\x04\b\x01\x10\x01\"S\n" + + "\x0fMessageEncoding\x12\x1c\n" + + "\x18MESSAGE_ENCODING_UNKNOWN\x10\x00\x12\x13\n" + + "\x0fLENGTH_PREFIXED\x10\x01\x12\r\n" + + "\tDELIMITED\x10\x02\"H\n" + + "\n" + + "JsonFormat\x12\x17\n" + + "\x13JSON_FORMAT_UNKNOWN\x10\x00\x12\t\n" + + "\x05ALLOW\x10\x01\x12\x16\n" + + "\x12LEGACY_BEST_EFFORT\x10\x02\"W\n" + + "\x12EnforceNamingStyle\x12 \n" + + "\x1cENFORCE_NAMING_STYLE_UNKNOWN\x10\x00\x12\r\n" + + "\tSTYLE2024\x10\x01\x12\x10\n" + + "\fSTYLE_LEGACY\x10\x02*\x06\b\xe8\a\x10\x8bN*\x06\b\x8bN\x10\x90N*\x06\b\x90N\x10\x91NJ\x06\b\xe7\a\x10\xe8\a\"\xef\x03\n" + + "\x12FeatureSetDefaults\x12X\n" + + "\bdefaults\x18\x01 \x03(\v2<.google.protobuf.FeatureSetDefaults.FeatureSetEditionDefaultR\bdefaults\x12A\n" + + "\x0fminimum_edition\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eminimumEdition\x12A\n" + + "\x0fmaximum_edition\x18\x05 \x01(\x0e2\x18.google.protobuf.EditionR\x0emaximumEdition\x1a\xf8\x01\n" + + "\x18FeatureSetEditionDefault\x122\n" + + "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12N\n" + + "\x14overridable_features\x18\x04 \x01(\v2\x1b.google.protobuf.FeatureSetR\x13overridableFeatures\x12B\n" + + "\x0efixed_features\x18\x05 \x01(\v2\x1b.google.protobuf.FeatureSetR\rfixedFeaturesJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03R\bfeatures\"\xb5\x02\n" + + "\x0eSourceCodeInfo\x12D\n" + + "\blocation\x18\x01 \x03(\v2(.google.protobuf.SourceCodeInfo.LocationR\blocation\x1a\xce\x01\n" + + "\bLocation\x12\x16\n" + + "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x16\n" + + "\x04span\x18\x02 \x03(\x05B\x02\x10\x01R\x04span\x12)\n" + + "\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n" + + "\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n" + + "\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xd0\x02\n" + + "\x11GeneratedCodeInfo\x12M\n" + + "\n" + + "annotation\x18\x01 \x03(\v2-.google.protobuf.GeneratedCodeInfo.AnnotationR\n" + + "annotation\x1a\xeb\x01\n" + + "\n" + + "Annotation\x12\x16\n" + + "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x1f\n" + + "\vsource_file\x18\x02 \x01(\tR\n" + + "sourceFile\x12\x14\n" + + "\x05begin\x18\x03 \x01(\x05R\x05begin\x12\x10\n" + + "\x03end\x18\x04 \x01(\x05R\x03end\x12R\n" + + "\bsemantic\x18\x05 \x01(\x0e26.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\bsemantic\"(\n" + + "\bSemantic\x12\b\n" + + "\x04NONE\x10\x00\x12\a\n" + + "\x03SET\x10\x01\x12\t\n" + + "\x05ALIAS\x10\x02*\xa7\x02\n" + + "\aEdition\x12\x13\n" + + "\x0fEDITION_UNKNOWN\x10\x00\x12\x13\n" + + "\x0eEDITION_LEGACY\x10\x84\a\x12\x13\n" + + "\x0eEDITION_PROTO2\x10\xe6\a\x12\x13\n" + + "\x0eEDITION_PROTO3\x10\xe7\a\x12\x11\n" + + "\fEDITION_2023\x10\xe8\a\x12\x11\n" + + "\fEDITION_2024\x10\xe9\a\x12\x17\n" + + "\x13EDITION_1_TEST_ONLY\x10\x01\x12\x17\n" + + "\x13EDITION_2_TEST_ONLY\x10\x02\x12\x1d\n" + + "\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" + + "\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" + + "\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" + + "\vEDITION_MAX\x10\xff\xff\xff\xff\aB~\n" + + "\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection" var ( file_google_protobuf_descriptor_proto_rawDescOnce sync.Once @@ -5145,7 +4832,7 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 18) var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) var file_google_protobuf_descriptor_proto_goTypes = []any{ (Edition)(0), // 0: google.protobuf.Edition @@ -5164,124 +4851,126 @@ var file_google_protobuf_descriptor_proto_goTypes = []any{ (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat - (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 27: google.protobuf.FileOptions - (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption - (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet - (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults - (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault - (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport - (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation + (FeatureSet_EnforceNamingStyle)(0), // 16: google.protobuf.FeatureSet.EnforceNamingStyle + (GeneratedCodeInfo_Annotation_Semantic)(0), // 17: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 18: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 19: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 20: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 21: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 22: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 23: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 24: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 25: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 26: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 27: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 28: google.protobuf.FileOptions + (*MessageOptions)(nil), // 29: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 30: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 31: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 32: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 33: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 34: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 35: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 36: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 37: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 38: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 39: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 40: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 41: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 42: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 43: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 44: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 45: google.protobuf.FieldOptions.EditionDefault + (*FieldOptions_FeatureSupport)(nil), // 46: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 47: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 48: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 49: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 50: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 19, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 20, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 24, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 26, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 22, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 28, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 39, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition - 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 22, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 22, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 20, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 24, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 41, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 23, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 29, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 42, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 36, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 43, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 37, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 30, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 31, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 25, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 32, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 44, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 33, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 27, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 34, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 35, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault - 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 37, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 46, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 36, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 46, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 36, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 37, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 47, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 77, // [77:77] is the sub-list for method output_type - 77, // [77:77] is the sub-list for method input_type - 77, // [77:77] is the sub-list for extension type_name - 77, // [77:77] is the sub-list for extension extendee - 0, // [0:77] is the sub-list for field type_name + 16, // 63: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle + 48, // 64: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 65: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 66: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 49, // 67: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 50, // 68: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 21, // 69: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 70: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 73: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 37, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 37, // 76: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 17, // 77: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 78, // [78:78] is the sub-list for method output_type + 78, // [78:78] is the sub-list for method input_type + 78, // [78:78] is the sub-list for extension type_name + 78, // [78:78] is the sub-list for extension extendee + 0, // [0:78] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5294,7 +4983,7 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), - NumEnums: 17, + NumEnums: 18, NumMessages: 33, NumExtensions: 0, NumServices: 0, diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 497da66e91..1ff0d1494d 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -412,23 +412,13 @@ func (x *Any) GetValue() []byte { var File_google_protobuf_any_proto protoreflect.FileDescriptor -var file_google_protobuf_any_proto_rawDesc = string([]byte{ - 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, - 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, - 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, - 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_any_proto_rawDesc = "" + + "\n" + + "\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"6\n" + + "\x03Any\x12\x19\n" + + "\btype_url\x18\x01 \x01(\tR\atypeUrl\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05valueBv\n" + + "\x13com.google.protobufB\bAnyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_any_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 00ac835c0b..06d584c14b 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -298,24 +298,13 @@ func (x *Timestamp) GetNanos() int32 { var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor -var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{ - 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, - 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, - 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, - 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01, - 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, - 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_timestamp_proto_rawDesc = "" + + "\n" + + "\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n" + + "\tTimestamp\x12\x18\n" + + "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" + + "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n" + + "\x13com.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_timestamp_proto_rawDescOnce sync.Once diff --git a/vendor/modules.txt b/vendor/modules.txt index 3235d65642..598f1361cb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -184,26 +184,20 @@ github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew -# github.com/distribution/distribution/v3 v3.0.0-20230519140516-983358f8e250 -## explicit; go 1.18 +# github.com/distribution/distribution/v3 v3.0.0 +## explicit; go 1.23.7 github.com/distribution/distribution/v3 -github.com/distribution/distribution/v3/context +github.com/distribution/distribution/v3/internal/dcontext +github.com/distribution/distribution/v3/internal/requestutil github.com/distribution/distribution/v3/manifest github.com/distribution/distribution/v3/manifest/manifestlist github.com/distribution/distribution/v3/manifest/ocischema -github.com/distribution/distribution/v3/manifest/schema1 github.com/distribution/distribution/v3/manifest/schema2 github.com/distribution/distribution/v3/metrics -github.com/distribution/distribution/v3/reference github.com/distribution/distribution/v3/registry/api/errcode github.com/distribution/distribution/v3/registry/api/v2 -github.com/distribution/distribution/v3/registry/client -github.com/distribution/distribution/v3/registry/client/auth -github.com/distribution/distribution/v3/registry/client/auth/challenge -github.com/distribution/distribution/v3/registry/client/transport github.com/distribution/distribution/v3/registry/storage/cache github.com/distribution/distribution/v3/registry/storage/cache/memory -github.com/distribution/distribution/v3/uuid # github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference @@ -230,7 +224,7 @@ github.com/docker/docker/pkg/longpath github.com/docker/docker/pkg/pools github.com/docker/docker/pkg/stdcopy github.com/docker/docker/pkg/system -# github.com/docker/docker-credential-helpers v0.8.1 +# github.com/docker/docker-credential-helpers v0.8.2 ## explicit; go 1.19 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials @@ -306,7 +300,7 @@ github.com/go-jose/go-jose/v4/json # github.com/go-ldap/ldap/v3 v3.4.3 ## explicit; go 1.13 github.com/go-ldap/ldap/v3 -# github.com/go-logr/logr v1.4.2 +# github.com/go-logr/logr v1.4.3 ## explicit; go 1.18 github.com/go-logr/logr # github.com/go-openapi/analysis v0.21.4 @@ -439,10 +433,13 @@ github.com/hashicorp/errwrap # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror -# github.com/hashicorp/golang-lru v1.0.2 -## explicit; go 1.12 -github.com/hashicorp/golang-lru -github.com/hashicorp/golang-lru/simplelru +# github.com/hashicorp/golang-lru/arc/v2 v2.0.7 +## explicit; go 1.18 +github.com/hashicorp/golang-lru/arc/v2 +# github.com/hashicorp/golang-lru/v2 v2.0.7 +## explicit; go 1.18 +github.com/hashicorp/golang-lru/v2/internal +github.com/hashicorp/golang-lru/v2/simplelru # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap @@ -563,7 +560,7 @@ github.com/oklog/ulid # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.1.0 +# github.com/opencontainers/image-spec v1.1.1 ## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 @@ -803,7 +800,6 @@ github.com/openshift/library-go/pkg/image/internal/digest github.com/openshift/library-go/pkg/image/internal/reference github.com/openshift/library-go/pkg/image/reference github.com/openshift/library-go/pkg/image/referencemutator -github.com/openshift/library-go/pkg/image/registryclient github.com/openshift/library-go/pkg/image/trigger github.com/openshift/library-go/pkg/legacyapi/legacygroupification github.com/openshift/library-go/pkg/manifest @@ -837,6 +833,17 @@ github.com/openshift/library-go/pkg/verify/store/parallel github.com/openshift/library-go/pkg/verify/store/serial github.com/openshift/library-go/pkg/verify/store/sigstore github.com/openshift/library-go/pkg/verify/util +# github.com/openshift/library-go/pkg/image/registryclient/v2 v2.0.0-00010101000000-000000000000 => github.com/tchap/library-go/pkg/image/registryclient/v2 v2.0.0-20250722083015-8716d02a8a94 +## explicit; go 1.24.0 +github.com/openshift/library-go/pkg/image/registryclient/v2 +github.com/openshift/library-go/pkg/image/registryclient/v2/auth +github.com/openshift/library-go/pkg/image/registryclient/v2/clienterrors +github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client +github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth +github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/auth/challenge +github.com/openshift/library-go/pkg/image/registryclient/v2/internal/client/transport +github.com/openshift/library-go/pkg/image/registryclient/v2/manifest/schema1 +github.com/openshift/library-go/pkg/image/registryclient/v2/transport # github.com/peterbourgon/diskv v2.0.1+incompatible ## explicit github.com/peterbourgon/diskv @@ -864,15 +871,15 @@ github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/promhttp/internal -# github.com/prometheus/client_model v0.6.1 -## explicit; go 1.19 +# github.com/prometheus/client_model v0.6.2 +## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.62.0 -## explicit; go 1.21 +# github.com/prometheus/common v0.65.0 +## explicit; go 1.23.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model -# github.com/prometheus/procfs v0.15.1 -## explicit; go 1.20 +# github.com/prometheus/procfs v0.17.0 +## explicit; go 1.23.0 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util @@ -1028,14 +1035,14 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.9.0 -## explicit; go 1.18 +# golang.org/x/time v0.12.0 +## explicit; go 1.23.0 golang.org/x/time/rate # golang.org/x/tools v0.34.0 ## explicit; go 1.23.0 golang.org/x/tools/container/intsets -# google.golang.org/protobuf v1.36.5 -## explicit; go 1.21 +# google.golang.org/protobuf v1.36.6 +## explicit; go 1.22 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -1880,3 +1887,4 @@ sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 # github.com/apcera/gssapi => github.com/openshift/gssapi v0.0.0-20161010215902-5fb4217df13b +# github.com/openshift/library-go/pkg/image/registryclient/v2 => github.com/tchap/library-go/pkg/image/registryclient/v2 v2.0.0-20250722083015-8716d02a8a94